source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
TRPO_Update.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
#include "TRPO.h"
double TRPO_Update(TRPOparam param, double *Result, const size_t NumThreads) {
//////////////////// Remarks ////////////////////
// Result: Updated Policy Parameters
//////////////////// Read Parameters ////////////////////
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Assign Parameters
const size_t NumLayers = param.NumLayers;
char * AcFunc = param.AcFunc;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
double ResidualTh = 1e-10;
size_t MaxIter = 10;
double MaxKL = 0.01;
double MaxBackTracks = 10;
double AcceptRatio = 0.1;
// Dimension of Observation Space
const size_t ObservSpaceDim = LayerSize[0];
// Dimension of Action Space
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// Number of Policy Parameters
size_t NumParams = NumParamsCalc(param.LayerSize, param.NumLayers);
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Model ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// LogStd[i] is the log of std[i] in the policy
double * LogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Policy Gradient ////////////////////
// The Policy Gradient Vector (PG) is the gradient of Surrogate Loss w.r.t. to policy parameters
// -PG is the input to the Conjugate Gradient (CG) function
// There is one-to-one correspondence between PG and policy parameters (W and B of neural network, LogStd)
double * PGW [NumLayers-1];
double * PGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
PGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
PGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Policy Gradient corresponding to LogStd
double * PGLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
double * Action = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Advantage = (double *) calloc(NumSamples, sizeof(double));
//////////////////// Memory Allocation - Ordinary Forward and Backward Propagation ////////////////////
// Layer[i] : Memory of each layer's outputs, i.e. y_i
// GLayer[i]: Gradient of Loss Function w.r.t. the pre-activation values in Layer[i], i.e. d(Loss)/d(x_i)
double * Layer [NumLayers];
double * GLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
Layer[i] = (double *) calloc(LayerSize[i], sizeof(double));
GLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// GW[i]: Gradient of Loss Function w.r.t to Neural Network Weight W[i]
// GB[i]: Gradient of Loss Function w.r.t to Neural Network Bias B[i]
// There is one-to-one correspondence between: GW[i] and W[i], GB[i] and B[i], GStd[i] and Std[i]
double * GW [NumLayers-1];
double * GB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
GW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
GB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// GLogStd[i]: Gradient of Loss Function w.r.t LogStd[i]
double * GLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Pearlmutter Forward and Backward Propagation ////////////////////
// RyLayer[i]: R{} of each layer's outputs, i.e. R{y_i}
// RxLayer[i]: R{} of each layer's pre-activated outputs, i.e. R{x_i}
// RGLayer[I]: R{} Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. R{d(KL)/d(x_i)}
double * RyLayer [NumLayers];
double * RxLayer [NumLayers];
double * RGLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
RyLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RxLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RGLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// RGW[i]: R{} Gradient of KL w.r.t. to Neural Network Weight W[i], i.e. R{d(KL)/d(W[i])}
// RGB[i]: R{} Gradient of KL w.r.t. to Neural Network Bias B[i], i.e. R{d(KL)/d(B[i])}
// There is one-to-one correspondence between: RGW[i] and W[i], RGB[i] and B[i]
double * RGW [NumLayers-1];
double * RGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
RGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
RGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// RGLogStd[i]: R{} Gradient of KL w.r.t LogStd[i]
double * RGLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Conjugate Gradient (CG) ////////////////////
// These names correspond to the names in the TRPO Python code
double * b = (double *) calloc(NumParams, sizeof(double));
double * p = (double *) calloc(NumParams, sizeof(double));
double * r = (double *) calloc(NumParams, sizeof(double));
double * x = (double *) calloc(NumParams, sizeof(double));
double * z = (double *) calloc(NumParams, sizeof(double));
//////////////////// Memory Allocation - Line Search ////////////////////
// These names correspond to the names in the TRPO Python code
// Note: In Line Search we also need a vector called x
// Here we just use the x declared for Conjugate Gradient for simlicity
// The x used in Line Search has nothing to do with the x used in CG
// They just have the same type and size
double * fullstep = (double *) calloc(NumParams, sizeof(double));
double * theta = (double *) calloc(NumParams, sizeof(double));
double * xnew = (double *) calloc(NumParams, sizeof(double));
//////////////////// Load Model ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &LogStd[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation - Note that Std = exp(LogStd)
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are writing the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
// Read Action
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Action[i*ActionSpaceDim+j]);
}
// Read Advantage
fscanf(DataFilePointer, "%lf", &Advantage[i]);
}
// Close Data File
fclose(DataFilePointer);
//////////////////// Main Computation Begins ////////////////////
// Measure Elapsed Time
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
//////////////////// Computing Policy Gradient ////////////////////
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Ordinary Forward Propagation /////////
// Assign Input Values
for (size_t i=0; i<ObservSpaceDim; ++i) Layer[0][i] = Observ[iter*ObservSpaceDim+i];
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
Layer[i+1][j] = B[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*LayerSize[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {Layer[i+1][j] = tanh(Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {Layer[i+1][j] = 0.1*Layer[i+1][j]; break;}
// sigmoid Activation Function
case 's': {Layer[i+1][j] = 1.0/(1+exp(-Layer[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
///////// Ordinary Backward Propagation /////////
// Gradient Initialisation
// Assign the derivative of Surrogate Loss w.r.t. Mean (output values from the final layer) and LogStd
for (size_t i=0; i<ActionSpaceDim; ++i) {
double temp = (Action[iter*ActionSpaceDim+i] - Mean[iter*ActionSpaceDim+i]) / exp(LogStd[i]);
GLayer[NumLayers-1][i] = Advantage[iter] * temp / exp(LogStd[i]);
GLogStd[i] = Advantage[iter] * (temp * temp - 1);
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
// Propagate from Layer[i] to Layer[i-1]
for (size_t j=0; j<LayerSize[i]; ++j) {
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {GLayer[i][j] = GLayer[i][j] * (1- Layer[i][j] * Layer[i][j]); break;}
// 0.1x Activation Function
case 'o': {GLayer[i][j] = 0.1 * GLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {GLayer[i][j] = GLayer[i][j] * Layer[i][j] * (1- Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer[%zu] is %c. Unsupported.\n", i, AcFunc[i]);
return -1;
}
}
// The derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
GB[i-1][j] = GLayer[i][j];
}
// Calculate the derivative w.r.t. to Weight
for (size_t j=0; j<LayerSize[i-1]; ++j) {
for (size_t k=0; k<LayerSize[i]; ++k) {
// The Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
GW[i-1][j*LayerSize[i]+k] = GLayer[i][k] * Layer[i-1][j];
}
}
// Calculate the derivative w.r.t. the output values from Layer[i]
for (size_t j=0; j<LayerSize[i-1]; ++j) {
GLayer[i-1][j] = 0;
for (size_t k=0; k<LayerSize[i]; ++k) {
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
GLayer[i-1][j] += GLayer[i][k] * W[i-1][j*LayerSize[i]+k];
}
}
}
// Accumulate the Policy Gradient to b
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
b[pos] += GW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
b[pos] += GB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
b[pos] += GLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Policy Gradient over the samples - Policy Gradient is held in b
// Note this corresponds to -g in the Python code: b = -g
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
b[i] = b[i] / (double)NumSamples;
}
//////////////////// Computing Search Direction ////////////////////
///////// Conjugate Gradient /////////
// This function implements Conjugate Gradient algorithm to solve linear equation Ax=b
// x: The Conjugate Gradient Result, i.e. solution x to Ax=b
// In TRPO context, x is the Step Direction of the line search (stepdir in the Python code)
// b: Vector b in the equation Ax=b
// Initialisation
double rdotr = 0;
for (size_t i=0; i<NumParams; ++i) {
p[i] = b[i];
r[i] = b[i];
rdotr += r[i] * r[i];
}
// Iterative Solver
for (size_t it=0; it<=MaxIter; ++it) {
// Calculate Frobenius Norm of x
double FrobNorm = 0;
#pragma omp parallel for reduction (+:FrobNorm)
for (size_t i=0; i<NumParams; ++i) {
FrobNorm += x[i] * x[i];
}
FrobNorm = sqrt(FrobNorm);
printf("CG Iter[%zu] Residual Norm=%.12e, Soln Norm=%.12e\n", it, rdotr, FrobNorm);
// Check Termination Condition
if (rdotr<ResidualTh || it==MaxIter) {
for (size_t i=0; i<NumParams; ++i) z[i] = x[i];
break;
}
///////// Fisher Vector Product Computation z = FVP(p) /////////
// Init PGW, PGB, PGLogStd from p
// Init z to 0
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
PGW[i][j*nextLayerDim+k] = p[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
PGB[i][k] = p[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
PGLogStd[k] = p[pos];
z[pos] = 0;
pos++;
}
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Combined Forward Propagation /////////
// Initialise the Input Layer
for (size_t i=0; i<ObservSpaceDim; ++i) {
Layer[0][i] = Observ[iter*ObservSpaceDim+i];
RxLayer[0][i] = 0;
RyLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
size_t CurrLayerSize = LayerSize[i];
size_t NextLayerSize = LayerSize[i+1];
size_t j, k;
// Propagate from Layer[i] to Layer[i+1]
#pragma omp parallel for private(j,k) shared(Layer, RxLayer, RyLayer, W, PGW, B, PGB, AcFunc) schedule(static)
for (j=0; j<NextLayerSize; ++j) {
// Initialise x_j and R{x_j} in next layer
// Here we just use y_j's memory space to store x_j temoporarily
Layer[i+1][j] = B[i][j];
RxLayer[i+1][j] = PGB[i][j];
for (k=0; k<CurrLayerSize; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += Layer[i][k] * PGW[i][k*NextLayerSize+j];
}
// Calculate y_j and R{y_j} in next layer. Note that R{y_j} depends on y_j
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {
RyLayer[i+1][j] = RxLayer[i+1][j];
break;
}
// tanh() Activation Function
case 't': {
Layer[i+1][j] = tanh(Layer[i+1][j]);
RyLayer[i+1][j] = RxLayer[i+1][j] * (1 - Layer[i+1][j] * Layer[i+1][j]);
break;
}
// 0.1x Activation Function
case 'o': {
Layer[i+1][j] = 0.1 * Layer[i+1][j];
RyLayer[i+1][j] = 0.1 * RxLayer[i+1][j];
break;
}
// sigmoid Activation Function
case 's': {
Layer[i+1][j] = 1.0 / ( 1 + exp(-Layer[i+1][j]) );
RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1 - Layer[i+1][j]);
break;
}
// Default: Activation Function not supported
default: {
printf("[ERROR] AC Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
}
}
}
}
///////// Pearlmutter Backward Propagation /////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
for (size_t i=0; i<ActionSpaceDim; ++i) {
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i] / Std[i] / Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
size_t CurrLayerSize = LayerSize[i];
size_t PrevLayerSize = LayerSize[i-1];
size_t j, k;
// Propagate from Layer[i] to Layer[i-1]
#pragma omp parallel for private(j) shared(Layer, RGLayer, RGB) schedule(static)
for (j=0; j<CurrLayerSize; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j]; break;}
// 0.1x Activation Function
case 'o': {RGLayer[i][j] = 0.1 * RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] AC Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight and the output values from Layer[i]
#pragma omp parallel for private(j,k) shared(Layer, RGLayer, W, RGW) schedule(static)
for (j=0; j<PrevLayerSize; ++j) {
double temp = 0;
for (k=0; k<CurrLayerSize; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*CurrLayerSize+k] = Layer[i-1][j] * RGLayer[i][k];
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
temp += W[i-1][j*CurrLayerSize+k] * RGLayer[i][k];
}
RGLayer[i-1][j] = temp;
}
}
// Accumulate the Fisher-Vector Product to z
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
z[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
z[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
z[pos] += 2 * PGLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
z[i] = z[i] / (double)NumSamples + CG_Damping * p[i];
}
//////////////// FVP Finish
// Update x and r
double pdotz = 0;
#pragma omp parallel for reduction (+:pdotz)
for (size_t i=0; i<NumParams; ++i) {
pdotz += p[i] * z[i];
}
double v = rdotr / pdotz;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
x[i] += v * p[i];
r[i] -= v * z[i];
}
// Update p
double newrdotr = 0;
#pragma omp parallel for reduction (+:newrdotr)
for (size_t i=0; i<NumParams; ++i) {
newrdotr += r[i] * r[i];
}
double mu = newrdotr / rdotr;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
p[i] = r[i] + mu * p[i];
}
// Update rdotr
rdotr = newrdotr;
}
// Calculate another Fisher Vector Product - code reuse opportunity
///////// Fisher Vector Product Computation z = FVP(x) /////////
// Init PGW, PGB, PGLogStd from x
// Init z to 0
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
PGW[i][j*nextLayerDim+k] = x[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
PGB[i][k] = x[pos];
z[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
PGLogStd[k] = x[pos];
z[pos] = 0;
pos++;
}
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Combined Forward Propagation /////////
// Initialise the Input Layer
for (size_t i=0; i<ObservSpaceDim; ++i) {
Layer[0][i] = Observ[iter*ObservSpaceDim+i];
RxLayer[0][i] = 0;
RyLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
size_t CurrLayerSize = LayerSize[i];
size_t NextLayerSize = LayerSize[i+1];
size_t j, k;
// Propagate from Layer[i] to Layer[i+1]
#pragma omp parallel for private(j,k) shared(Layer, RxLayer, RyLayer, W, PGW, B, PGB, AcFunc) schedule(static)
for (j=0; j<NextLayerSize; ++j) {
// Initialise x_j and R{x_j} in next layer
// Here we just use y_j's memory space to store x_j temoporarily
Layer[i+1][j] = B[i][j];
RxLayer[i+1][j] = PGB[i][j];
for (k=0; k<CurrLayerSize; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += Layer[i][k] * PGW[i][k*NextLayerSize+j];
}
// Calculate y_j and R{y_j} in next layer. Note that R{y_j} depends on y_j
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {
RyLayer[i+1][j] = RxLayer[i+1][j];
break;
}
// tanh() Activation Function
case 't': {
Layer[i+1][j] = tanh(Layer[i+1][j]);
RyLayer[i+1][j] = RxLayer[i+1][j] * (1 - Layer[i+1][j] * Layer[i+1][j]);
break;
}
// 0.1x Activation Function
case 'o': {
Layer[i+1][j] = 0.1 * Layer[i+1][j];
RyLayer[i+1][j] = 0.1 * RxLayer[i+1][j];
break;
}
// sigmoid Activation Function
case 's': {
Layer[i+1][j] = 1.0 / ( 1 + exp(-Layer[i+1][j]) );
RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1 - Layer[i+1][j]);
break;
}
// Default: Activation Function not supported
default: {
printf("[ERROR] AC Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
}
}
}
}
///////// Pearlmutter Backward Propagation /////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
for (size_t i=0; i<ActionSpaceDim; ++i) {
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i] / Std[i] / Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
size_t CurrLayerSize = LayerSize[i];
size_t PrevLayerSize = LayerSize[i-1];
size_t j, k;
// Propagate from Layer[i] to Layer[i-1]
#pragma omp parallel for private(j) shared(Layer, RGLayer, RGB) schedule(static)
for (j=0; j<CurrLayerSize; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j]; break;}
// 0.1x Activation Function
case 'o': {RGLayer[i][j] = 0.1 * RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] AC Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight and the output values from Layer[i]
#pragma omp parallel for private(j,k) shared(Layer, RGLayer, W, RGW) schedule(static)
for (j=0; j<PrevLayerSize; ++j) {
double temp = 0;
for (k=0; k<CurrLayerSize; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*CurrLayerSize+k] = Layer[i-1][j] * RGLayer[i][k];
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
temp += W[i-1][j*CurrLayerSize+k] * RGLayer[i][k];
}
RGLayer[i-1][j] = temp;
}
}
// Accumulate the Fisher-Vector Product to z
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
z[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
z[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
z[pos] += 2 * PGLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
z[i] = z[i] / (double)NumSamples + CG_Damping * x[i];
}
// Now z holds the Fisher Vector Product, x holds stepdir
double shs = 0;
#pragma omp parallel for reduction (+:shs)
for (size_t i=0; i<NumParams; ++i) {
shs += z[i] * x[i];
}
shs = shs * 0.5;
printf("shs: %.14f\n", shs);
// Lagrange Multiplier (lm in Python code)
double lm = sqrt(shs / MaxKL);
// Compute the 2-norm of the Policy Gradient
double gnorm = 0;
for (size_t i=0; i<NumParams; ++i) {
gnorm += b[i] * b[i];
}
gnorm = sqrt(gnorm);
printf("lagrange multiplier: %.14f, gnorm: %.14f\n", lm, gnorm);
// Full Step
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
fullstep[i] = x[i] / lm;
}
// Inner product of Negative Policy Gradient -g and Step Direction
double neggdotstepdir = 0;
#pragma omp parallel for reduction (+:neggdotstepdir)
for (size_t i=0; i<NumParams; ++i) {
neggdotstepdir += b[i] * x[i];
}
//////////////////// Line Search ////////////////////
// Init theta to x
// If Line Search is unsuccessful, theta remains as x
for (size_t i=0; i<NumParams; ++i) theta[i] = x[i];
// Expected Improve Rate Line Search = slope dy/dx at initial point
double expected_improve_rate = neggdotstepdir / lm;
// Temporarily Save the Model Parameters in x
// The x refers to the x in linesearch function in Python code
// Note: Although the name is the same, the x here has nothing to do with the x in Conjugate Gradient
// Copy the Model Parameters to x
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
x[pos] = W[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
x[pos] = B[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
x[pos] = LogStd[k];
pos++;
}
// Surrogate Loss of the current Model parameters = -Avg(Advantage)
double fval = 0;
#pragma omp parallel for reduction (+:fval)
for (size_t i=0; i<NumSamples; ++i) {
fval += Advantage[i];
}
fval = -fval / (double) NumSamples;
printf("fval before %.14e\n", fval);
// Backtracking Line Search
for (size_t i=0; i<MaxBackTracks; ++i) {
// Step Fraction
double stepfrac = pow(0.5, (double)i);
// x New
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
xnew[i] = x[i] + stepfrac * fullstep[i];
}
///////// Compute Surrogate Loss /////////
// Init W, B, LogStd from xnew
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
W[i][j*nextLayerDim+k] = xnew[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
B[i][k] = xnew[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
LogStd[k] = xnew[pos];
pos++;
}
// Init Surrogate Loss to 0
double surr = 0;
for (size_t iter=0; iter<NumSamples; iter++) {
///////// Ordinary Forward Propagation /////////
// Assign Input Values
for (size_t i=0; i<ObservSpaceDim; ++i) Layer[0][i] = Observ[iter*ObservSpaceDim+i];
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
Layer[i+1][j] = B[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*LayerSize[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {Layer[i+1][j] = tanh(Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {Layer[i+1][j] = 0.1*Layer[i+1][j]; break;}
// sigmoid Activation Function
case 's': {Layer[i+1][j] = 1.0/(1+exp(-Layer[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Surrogate Loss Calculation
// LoglikelihoodDifference = logp_i - oldlogp_i
// Here, logp_i is derived from xnew, oldlogp_i is derived from x (Mean in the simulation data)
double LoglikelihoodDifference = 0;
for (size_t i=0; i<ActionSpaceDim; ++i) {
double temp_x = (Action[iter*ActionSpaceDim+i] - Mean[iter*ActionSpaceDim+i]) / Std[i];
double temp_xnew = (Action[iter*ActionSpaceDim+i] - Layer[NumLayers-1][i]) / exp(LogStd[i]);
LoglikelihoodDifference += temp_x*temp_x - temp_xnew*temp_xnew + log(Std[i]) - LogStd[i];
}
LoglikelihoodDifference = LoglikelihoodDifference * 0.5;
// Accumulate Surrogate Loss
surr += exp(LoglikelihoodDifference) * Advantage[iter];
}
// Average Surrogate Loss over the samples to get newfval
double newfval = -surr / (double) NumSamples;
// Improvement in terms of Surrogate Loss
double actual_improve = fval - newfval;
// Expected Improvement
double expected_improve = expected_improve_rate * stepfrac;
// Improvement Ratio
double ratio = actual_improve / expected_improve;
printf("a/e/r %.14f / %.14f / %.14f\n", actual_improve, expected_improve, ratio);
// Check breaking condition - has Line Search succeeded?
if ( (ratio > AcceptRatio) && (actual_improve > 0) ) {
// If Line Search is successful, update parameters and quit
for (size_t i=0; i<NumParams; ++i) theta[i] = xnew[i];
break;
}
} // End of Line Search
// Copy theta to Result
// Note that these are the updated Model parameters
for (size_t i=0; i<NumParams; ++i) Result[i] = theta[i];
gettimeofday(&tv2, NULL);
double runtimeS = ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
//////////////////// Clean Up ////////////////////
// Model - From Model File
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(B[i]);
}
free(LogStd);
// Simulation Data - From Data File
free(Observ); free(Mean); free(Std); free(Action); free(Advantage);
// Forward and Backward Propagation
for (size_t i=0; i<NumLayers; ++i) {
// Ordinary Forward and Backward Propagation
free(Layer[i]); free(GLayer[i]);
// Pearlmutter Forward and Backward Propagation
free(RxLayer[i]); free(RyLayer[i]); free(RGLayer[i]);
}
// Gradient
for (size_t i=0; i<NumLayers-1; ++i) {
// Gradient - temporary storage
free(GW[i]); free(GB[i]);
// Policy Gradient
free(PGW[i]); free(PGB[i]);
// Pearlmutter R{} Gradient
free(RGW[i]); free(RGB[i]);
}
// Gradient - LogStd
free(GLogStd); free(PGLogStd); free(RGLogStd);
// Conjugate Gradient
free(b); free(p); free(r); free(x); free(z);
// Line Search
free(fullstep); free(xnew); free(theta);
return runtimeS;
}
|
ParallelClusterCreator.h | #include <memory>
#include <omp.h>
//class for running clustering algorithm on Charts
struct ParallelClusterCreator
{
static uint32_t create_charts(std::map<uint32_t, uint32_t> &chart_id_map,
Polyhedron &P,
const double cost_threshold,
const uint32_t chart_threshold,
CLUSTER_SETTINGS cluster_settings){
std::vector<Chart> charts;
if (chart_id_map.size() == 0)
{
std::cout << "Creating charts from individual faces\n";
//create charts
create_initial_charts(charts, P);
//populate lookup table to allow quicker determination of whether faces are neighbours when making joins
populate_chart_LUT(charts, chart_id_map);
}
else {
std::cout << "Creating charts from grid based initial splitting\n";
uint32_t num_charts = ClusterCreator::initialise_charts_from_grid_clusters(P, chart_id_map, charts, cluster_settings, chart_threshold);
// populate_chart_LUT(charts, chart_id_map);
// check that existing chart number is not already lower than threshold
if (num_charts <= chart_threshold)
{
std::cout << "Input to chart clusterer already had number of charts below chart threshold" << std::endl;
return num_charts;
}
//recalculate perimeters of charts to ensure they are correct
for (auto& chart : charts) {
chart.recalculate_perimeter_from_scratch();
chart.create_neighbour_set(chart_id_map);
}
// return chart_id_map.size();
}
//create join bank vector and queue
std::vector< std::shared_ptr<JoinOperation> > join_queue;
create_joins_from_chart_vector(charts, join_queue, cluster_settings, chart_id_map);
//do the clustering
cluster_faces(charts,join_queue, cost_threshold, chart_threshold,cluster_settings, chart_id_map);
return populate_chart_LUT(charts, chart_id_map);;
}
//builds a chart list where each face of a polyhedron is 1 chart
static void
create_initial_charts(std::vector<Chart> &charts,
Polyhedron &P){
//calculate areas of each face
std::cout << "Calculating face areas...\n";
std::map<face_descriptor,double> fareas;
for(face_descriptor fd: faces(P)){
fareas[fd] = CGAL::Polygon_mesh_processing::face_area (fd,P);
}
//calculate normals of each faces
std::cout << "Calculating face normals...\n";
std::map<face_descriptor,Vector> fnormals;
CGAL::Polygon_mesh_processing::compute_face_normals(P,boost::make_assoc_property_map(fnormals));
//get boost face iterator
face_iterator fb_boost, fe_boost;
boost::tie(fb_boost, fe_boost) = faces(P);
//each face begins as its own chart
std::cout << "Creating initial charts...";
for ( Facet_iterator fb = P.facets_begin(); fb != P.facets_end(); ++fb){
//init chart instance for face
Chart c(charts.size(),std::make_shared<Facet>(*fb), fnormals[*fb_boost], fareas[*fb_boost]);
charts.push_back(c);
fb_boost++;
}
std::cout << "..." << charts.size() << " charts.\n";
}
static void
create_joins_from_chart_vector(std::vector<Chart> &charts,
// std::vector<std::shared_ptr<JoinOperation> > &joins,
std::vector< std::shared_ptr<JoinOperation> > &join_queue,
CLUSTER_SETTINGS cluster_settings,
std::map<uint32_t, uint32_t> &chart_id_map){
std::cout << "Creating joins from chart list...";
std::set<uint32_t> processed_charts;
std::set<uint32_t> chart_neighbours;
//for each chart
for (auto& chart : charts)
{
chart_neighbours.clear();
// for each face in chart, find neighbours, add to chart_neighbours set
for (auto& face : chart.facets)
{
//for each edge
Halfedge_facet_circulator fc = face->facet_begin();
do {
if (!fc->is_border() && !(fc->opposite()->is_border()) )//guard against no neighbour at this edge
{
//get chart id of neighbour, add to set if it is not this chart
uint32_t nbr_face_id = fc->opposite()->facet()->id();
uint32_t nbr_chart_id = chart_id_map[nbr_face_id];
if (nbr_chart_id != chart.id){
chart_neighbours.insert(nbr_chart_id);
}
}
} while ( ++fc != face->facet_begin());
}
//create joins...
//if neighbours have not already been processed, create join between this and neighbour
for (auto& nbr_chart_id : chart_neighbours)
{
//make sure it hasnt been processed already
if (processed_charts.find(nbr_chart_id) == processed_charts.end())
{
// chart ids should be equal to their index in the vector at this point
JoinOperation join (chart.id, nbr_chart_id ,JoinOperation::cost_of_join(charts[chart.id],charts[nbr_chart_id], cluster_settings));
join_queue.push_back( std::make_shared<JoinOperation>(join));
}
}
//add this chart to set of processed charts, so that it is not considered for new joins
processed_charts.insert(chart.id);
} // end for each chart
std::cout << join_queue.size() << " joins\n";
}
//takes a list of joins and charts, and executes joins until target number of charts/cost threshold is reached
static void
cluster_faces(std::vector<Chart> &charts,
// std::vector<std::shared_ptr<JoinOperation> > &joins,
std::vector< std::shared_ptr<JoinOperation> >& join_queue,
const double cost_threshold,
const uint32_t chart_threshold,
CLUSTER_SETTINGS &cluster_settings,
std::map<uint32_t, uint32_t> &chart_id_map
){
if (join_queue.empty())
{
std::cout << "ERROR: join_queue is empty - no joins possible" << std::endl;
}
std::cout << "Clustering faces...." << std::endl;
// std::stringstream report;
// report << "--------------------\nReport:\n----------------------\n";
std::vector< std::shared_ptr<JoinOperation> >::iterator it;
//for reporting and calculating when to stop merging
const uint32_t initial_charts = charts.size();
const uint32_t desired_merges = initial_charts - chart_threshold;
uint32_t chart_merges = 0;
int prev_cost_percent = -1;
int prev_charts_percent = -1;
int overall_percent = -1;
//key chart position (in chart vector) :: value - list of pointers to join operations that reference this chart
// std::map<uint32_t, std::vector<std::shared_ptr<JoinOperation> > > chart_to_join_inverse_index;
// populate_inverse_index(chart_to_join_inverse_index, charts, joins);
// join_queue.sort(JoinOperation::sort_join_ptrs);
std::sort(join_queue.begin(),join_queue.end(), JoinOperation::sort_join_ptrs);
const double lowest_cost = join_queue.front()->cost;
//execute lowest join cost and update affected joins. re-sort.
std::cout << "Processing join queue...\n";
while (join_queue.front()->cost < cost_threshold
&& !join_queue.empty()
&& (charts.size() - chart_merges) > chart_threshold){
//reporting-------------
int percent = (int)(((join_queue.front()->cost - lowest_cost) / (cost_threshold - lowest_cost)) * 100);
if (percent != prev_cost_percent && percent > overall_percent) {
prev_cost_percent = percent;
overall_percent = percent;
std::cout << percent << " percent complete (" << chart_merges << " merges done)\n";
}
percent = (int)(((float)chart_merges / (float)desired_merges) * 100);
if (percent != prev_charts_percent && percent > overall_percent) {
prev_charts_percent = percent;
overall_percent = percent;
std::cout << percent << " percent complete (" << chart_merges << " merges done)\n";
}
JoinOperation join_todo = *(join_queue.front());
join_queue.erase(join_queue.begin());
//guard against inactive joins
if (!join_todo.active)
{
continue;
}
//check amount of neighbours resulting chart would have. if too few, skip to next one
if (join_todo.results_in_chart_with_neighbours(charts, chart_id_map) < 3)
{
continue;
}
//merge faces from chart2 into chart 1
// std::cout << "merging charts " << join_todo.chart1_id << " and " << join_todo.chart2_id << std::endl;
// charts[join_todo.get_chart1_id()].merge_with(charts[join_todo.get_chart2_id()], join_todo.cost);
charts[join_todo.get_chart1_id()].merge_with(charts[join_todo.get_chart2_id()]);
//DEactivate chart 2
if (charts[join_todo.get_chart2_id()].active == false)
{
// report << "chart " << join_todo.chart2_id << " was already inactive at merge " << chart_merges << std::endl; // should not happen
continue;
}
//DEactivate chart 2
charts[join_todo.get_chart2_id()].active = false;
//--------------------------------------------------------------
//update remaining joins that include either of the merged charts
//--------------------------------------------------------------
#if 0
// use inverse index to retrieve the joins that need to be updated
//merge affected join lists from 2 charts involved (from chart 2 to 1)
std::vector<std::shared_ptr<JoinOperation>>& affected_joins = chart_to_join_inverse_index[join_todo.get_chart1_id()];
affected_joins.insert(
affected_joins.end() ,
chart_to_join_inverse_index[join_todo.get_chart2_id()].begin(),
chart_to_join_inverse_index[join_todo.get_chart2_id()].end());
// std::cout << "merged: " << affected_joins.size() << "\n";
std::list<uint32_t> indices_to_remove;
//for each affected join, update or add to list for removal
for (uint32_t i = 0; i < affected_joins.size(); i++){
// std::shared_ptr<JoinOperation> join_op = affected_joins[i];
std::shared_ptr<JoinOperation> join_op ( affected_joins[i] );
//replace expired chart and sorts chart ids
join_op->replace_id_with(join_todo.get_chart2_id(), join_todo.get_chart1_id());
//check if this join is within a chart now - add to removal list
if (join_op->get_chart1_id() == join_op-> get_chart2_id())
{
indices_to_remove.push_back(i);
join_op->active = false;
}
}
// std::cout << "to remove: " << indices_to_remove.size() << "\n";
//remove those not needed any more
indices_to_remove.sort();
int num_removed = 0;
for (auto id : indices_to_remove) {
std::vector< std::shared_ptr<JoinOperation> >::iterator it2 = affected_joins.begin();
// adjust ID to be deleted to account for previously deleted items
std::advance(it2, id - num_removed);
affected_joins.erase(it2);
num_removed++;
}
// std::cout << "after removing: " << affected_joins.size() << "\n";
//remove duplicates in affected joins
auto new_end_of_array = std::unique(affected_joins.begin(), affected_joins.end(), JoinOperation::compare);
affected_joins.resize( std::distance(affected_joins.begin(),new_end_of_array) );
// std::cout << "after removing duplicates: " << affected_joins.size() << "\n";
//recalculate costs for what is left
for (uint32_t i = 0; i < affected_joins.size(); i++){
// std::cout << "join " << i << std::endl;
std::shared_ptr<JoinOperation> join_op ( affected_joins[i] );
// std::cout << "got join " << i << std::endl;
join_op->cost = JoinOperation::cost_of_join(charts[join_op->get_chart1_id()], charts[join_op->get_chart2_id()], cluster_settings);
// std::cout << "costed join " << i << std::endl;
}
// std::cout << "updated\n";
//resort join queue
std::sort(join_queue.begin(),join_queue.end(), JoinOperation::sort_join_ptrs);
// std::cout << "sorted\n";
#else
//old method of updating join list
std::vector<int> to_erase_merged;
std::vector<std::shared_ptr<JoinOperation> > to_recalculate_error_merged;
#pragma omp declare reduction (merge : std::vector<int> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
#pragma omp declare reduction (merge : std::vector< std::shared_ptr<JoinOperation> > : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
//find affected joins and add to list for erase/update
#pragma omp parallel for reduction(merge: to_erase_merged,to_recalculate_error_merged)
for(uint32_t j = 0; j < join_queue.size(); j++)
{
//if join is affected, update references and cost
if ( join_queue[j]->get_chart1_id() == join_todo.get_chart1_id()
|| join_queue[j]->get_chart1_id() == join_todo.get_chart2_id()
|| join_queue[j]->get_chart2_id() == join_todo.get_chart1_id()
|| join_queue[j]->get_chart2_id() == join_todo.get_chart2_id() )
{
//eliminate references to joined chart 2 (it is no longer active)
// by pointing them to chart 1
if ( join_queue[j]->get_chart1_id() == join_todo.get_chart2_id()){
join_queue[j]->set_chart1_id( join_todo.get_chart1_id() );
}
if ( join_queue[j]->get_chart2_id() == join_todo.get_chart2_id()){
join_queue[j]->set_chart2_id( join_todo.get_chart1_id() );
}
//save this join to be deleted (and replaced in queue if necessary)
// to_erase[omp_get_thread_num()].push_back(j);
to_erase_merged.push_back(j);
//search for duplicates
if ( join_queue[j]->get_chart1_id() == join_todo.get_chart1_id()
&& join_queue[j]->get_chart2_id() == join_todo.get_chart2_id() ){
// report << "duplicate found : c1 = " << it->chart1_id << ", c2 = " << it->chart2_id << std::endl;
//set inactive
join_queue[j]->active = false;
}
//check for joins within a chart
else if ( join_queue[j]->get_chart1_id() == join_queue[j]->get_chart2_id())
{
// report << "Join found within a chart: " << join_queue[j]->chart1_id << std::endl;
//set inactive
join_queue[j]->active = false;
}
else {
//add (pointer of JO) to vector to be updated
// to_recalculate_error[omp_get_thread_num()].push_back(join_queue[j]);
to_recalculate_error_merged.push_back(join_queue[j]);
}
}
}
//erase all elements that need to be erased (either no longer needed or will be recalculated)
std::sort(to_erase_merged.begin(), to_erase_merged.end());
int num_erased = 0;
for (auto id : to_erase_merged) {
std::vector< std::shared_ptr<JoinOperation> >::iterator it2 = join_queue.begin();
// adjust ID to be deleted to account for previously deleted items
std::advance(it2, id - num_erased);
join_queue.erase(it2);
num_erased++;
}
//recalculate error for joins that need to be updated
#pragma omp parallel for
for(uint32_t j = 0; j < to_recalculate_error_merged.size(); j++){
std::shared_ptr<JoinOperation> join_ptr ( to_recalculate_error_merged[j] );
join_ptr->cost = JoinOperation::cost_of_join(charts[join_ptr->get_chart1_id()], charts[join_ptr->get_chart2_id()], cluster_settings);
}
// replace joins that were filtered out to be sorted
if (to_recalculate_error_merged.size() > 0)
{
std::sort(to_recalculate_error_merged.begin(), to_recalculate_error_merged.end(), JoinOperation::sort_join_ptrs);
std::vector< std::shared_ptr<JoinOperation> >::iterator it2;
uint32_t insert_item = 0;
for (it2 = join_queue.begin(); it2 != join_queue.end(); ++it2){
//insert items while join list item has bigger cost than element to be inserted
while ( insert_item < to_recalculate_error_merged.size() &&
(*it2)->cost > to_recalculate_error_merged[insert_item]->cost){
join_queue.insert(it2, to_recalculate_error_merged[insert_item]);
insert_item++;
}
//if all items are in place, we are done
if (insert_item >= to_recalculate_error_merged.size())
{
break;
}
}
//add any remaining items to end of queue
for (uint32_t i = insert_item; i < to_recalculate_error_merged.size(); i++){
join_queue.push_back(to_recalculate_error_merged[i]);
}
}
#endif
chart_merges++;
}
std::cout << "--------------------\nCharts:\n----------------------\n";
uint32_t total_faces = 0;
uint32_t total_active_charts = 0;
for (uint32_t i = 0; i < charts.size(); ++i)
{
if (charts[i].active)
{
uint32_t num_faces = charts[i].facets.size();
total_faces += num_faces;
total_active_charts++;
}
}
if (!join_queue.empty()) {
std::cout << "joins remaining: " << join_queue.size() << std::endl;
std::cout << "Cost of cheapest un-executed join: " << join_queue.front()->cost << std::endl;
}
else {
std::cout << "join list empty" << std::endl;
}
std::cout << "Total number of faces in charts = " << total_faces << std::endl;
std::cout << "Initial charts = " << charts.size() << std::endl;
std::cout << "Total number merges = " << chart_merges << std::endl;
std::cout << "Total active charts = " << total_active_charts << std::endl;
// std::cout << report.str();
}
//fill chart_id_map from chart vector
static uint32_t populate_chart_LUT(std::vector<Chart> &charts, std::map<uint32_t, uint32_t> &chart_id_map){
chart_id_map.clear();
//populate LUT for face to chart mapping
//count charts on the way to apply new chart ids
uint32_t active_charts = 0;
for (uint32_t id = 0; id < charts.size(); ++id) {
auto& chart = charts[id];
if (chart.active) {
for (auto& f : chart.facets) {
chart_id_map[f->id()] = active_charts;
}
active_charts++;
}
}
return active_charts;
}
//fills inverse index linking each chart with joins that reference it
static void populate_inverse_index( std::map<uint32_t, std::vector<std::shared_ptr<JoinOperation> > > &chart_to_join_inverse_index,
std::vector<Chart> &charts,
std::vector<std::shared_ptr<JoinOperation> > &joins){
if (charts.size() == 0)
{
std::cout << "WARNING: no charts received in populate_inverse_index() \n";
return;
}
if (joins.size() == 0)
{
std::cout << "WARNING: no joins received in populate_inverse_index() \n";
return;
}
if (chart_to_join_inverse_index.size() == 0)
{
std::cout << "building inverse index from scratch...";
//initialise map?
// for (int i = 0; i < charts.size())
}
//for each join, add a pointer to the list for each relevant chart
for (uint32_t i = 0; i < joins.size(); i++){
// chart_to_join_inverse_index[joins[i].get_chart1_id()].push_back( &(joins[i]) );
// chart_to_join_inverse_index[joins[i].get_chart2_id()].push_back( &(joins[i]) );
chart_to_join_inverse_index[joins[i]->get_chart1_id()].push_back( std::shared_ptr<JoinOperation>( joins[i] ) );
chart_to_join_inverse_index[joins[i]->get_chart2_id()].push_back( std::shared_ptr<JoinOperation>( joins[i] ) );
}
std::cout << "Inverse index populated with " << chart_to_join_inverse_index.size() << " entries\n";
//debug only - checking inverse index was created correctly
// for(auto& entry : chart_to_join_inverse_index){
// if(entry.second.size() == 0)
// std::cout << "Chart with no joins: " << entry.first << std::endl;
// }
}
};
|
omp_loop_static.h | // -*- C++ -*-
// Copyright (C) 2007-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/omp_loop_static.h
* @brief Parallelization of embarrassingly parallel execution by
* means of an OpenMP for loop with static scheduling.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H
#define _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H 1
#include <omp.h>
#include <parallel/settings.h>
#include <parallel/basic_iterator.h>
namespace __gnu_parallel
{
/** @brief Embarrassingly parallel algorithm for random access
* iterators, using an OpenMP for loop with static scheduling.
*
* @param __begin Begin iterator of element sequence.
* @param __end End iterator of element sequence.
* @param __o User-supplied functor (comparator, predicate, adding
* functor, ...).
* @param __f Functor to @a process an element with __op (depends on
* desired functionality, e. g. for std::for_each(), ...).
* @param __r Functor to @a add a single __result to the already processed
* __elements (depends on functionality).
* @param __base Base value for reduction.
* @param __output Pointer to position where final result is written to
* @param __bound Maximum number of elements processed (e. g. for
* std::count_n()).
* @return User-supplied functor (that may contain a part of the result).
*/
template<typename _RAIter,
typename _Op,
typename _Fu,
typename _Red,
typename _Result>
_Op
__for_each_template_random_access_omp_loop_static(_RAIter __begin,
_RAIter __end, _Op __o,
_Fu& __f, _Red __r,
_Result __base,
_Result& __output,
typename std::iterator_traits<_RAIter>::difference_type __bound)
{
typedef typename std::iterator_traits<_RAIter>::difference_type
_DifferenceType;
_DifferenceType __length = __end - __begin;
_ThreadIndex __num_threads = std::min<_DifferenceType>
(__get_max_threads(), __length);
_Result *__thread_results;
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__thread_results = new _Result[__num_threads];
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__thread_results[__i] = _Result();
}
_ThreadIndex __iam = omp_get_thread_num();
#pragma omp for schedule(static, _Settings::get().workstealing_chunk_size)
for (_DifferenceType __pos = 0; __pos < __length; ++__pos)
__thread_results[__iam] = __r(__thread_results[__iam],
__f(__o, __begin+__pos));
} //parallel
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__output = __r(__output, __thread_results[__i]);
delete [] __thread_results;
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f.finish_iterator = __begin + __length;
return __o;
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H */
|
oyranos_cmm_oyra_image_channel.c | /** @file oyranos_cmm_oyra_image_channel.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2016 (C) Kai-Uwe Behrmann
*
* @brief Channel selection module for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2016/04/04
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi7_s.h"
#include "oyCMMui_s.h"
#include "oyConnectorImaging_s.h"
#include "oyRectangle_s.h"
#include "oyRectangle_s_.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_db.h"
#include "oyranos_helper.h"
#include "oyranos_i18n.h"
#include "oyranos_string.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <stdint.h> /* UINT32_MAX */
#endif
#include <locale.h>
/* OY_IMAGE_CHANNEL_REGISTRATION */
/* OY_IMAGE_CHANNEL_REGISTRATION ----------------------------------------------*/
/** @func oyraFilter_ImageChannelRun
* @brief implement oyCMMFilter_GetNext_f()
*
* The "channel" option is build of channel fields. It contains the
* output section in one text string each in squared brackets: "[a|b|c]".
* Each channel is separated by pipe sign '|' and can contain the channel
* symbol or a fill value. -1 indicates the module shall select a appropriate
* fill value. The counting of channels starts from a and ends with z,
* covering the range of ASCII a-z. A special case is a "" no op signature.
* Use it for pass through.
*
* With the above syntax it is possible to add or remove channels or simply
* switch channels of.
*
* switch the second and thierd channels of: ["a", -1, -1]
*
* swap first with thierd channel: ["c", "b". "a"]
*
* duplicate the second channel and skip the first and possible the c and
* more source channels: ["b", "b"]
*
* Note: changing the channel count might require a new ICC profile for the
* output image. Please setup the graph accordingly.
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2016/04/04 (Oyranos: 0.9.6)
*/
int oyraFilter_ImageChannelRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int result = 0, error = 0;
oyFilterSocket_s * socket;
oyFilterNode_s * input_node = 0,
* node;
oyFilterPlug_s * plug = NULL;
oyImage_s * image;
int dirty = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!image)
{
result = 1;
goto oyraFilter_ImageChannelRun_clean;
}
if(oy_debug)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) );
{
const char * channels_json;
oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 );
oyjl_val json = 0;
char * t;
if(!node_opts)
dirty = 1;
if(dirty)
{
result = dirty;
goto oyraFilter_ImageChannelRun_clean2;
}
plug = oyFilterNode_GetPlug( node, 0 );
/* select node */
input_node = oyFilterNode_GetPlugNode( node, 0 );
/* find filters own channel factor */
channels_json = oyOptions_FindString( node_opts,
"//" OY_TYPE_STD "/channel/channel",
0 );
oyOptions_Release( &node_opts );
error = !channels_json;
if(error) {WARNc_S("found not \"channel\" option for filter");}
else if(oy_debug)
oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"channels_json: \"%s\"",OY_DBG_ARGS_, channels_json);
if(!error && strlen(channels_json))
{
char * save_locale = 0;
/* sensible parsing */
save_locale = oyStringCopy_( setlocale( LC_NUMERIC, 0 ),
oyAllocateFunc_ );
setlocale( LC_NUMERIC, "C" );
t = oyAllocateFunc_(256);
json = oyjl_tree_parse( channels_json, t, 256 );
if(t[0])
{
WARNc2_S( "channel option: %s: %s\n", _("found issues parsing JSON"), t );
error = 1;
}
oyFree_m_(t);
setlocale(LC_NUMERIC, save_locale);
if(save_locale)
oyFree_m_( save_locale );
}
if(oy_debug > 2)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s",OY_DBG_ARGS_, oyPixelAccess_Show(ticket));
if(channels_json && strlen(channels_json) > 2)
{
oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket );
oyArray2d_s * a_dest = oyPixelAccess_GetArray( ticket );
int layout_src = oyImage_GetPixelLayout( image, oyLAYOUT );
int layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT );
int channels_src = oyToChannels_m( layout_src );
int channels_dst = oyToChannels_m( layout_dst );
int ticket_array_pix_width;
int count = oyjl_value_count( json ), i;
const int max_channels = 'z'-'a'+1;
double channel[max_channels+1];
int channel_pos[max_channels+1];
/* avoid division by zero */
if(!channels_src) channels_src = 1;
if(!channels_dst) channels_dst = 1;
ticket_array_pix_width = oyArray2d_GetWidth( a_dest ) / channels_dst;
memset( channel, 0, sizeof(double) * (max_channels+1) );
memset( channel_pos, 0, sizeof(int) * (max_channels+1) );
if(count > channels_dst)
{
WARNc3_S( "\"channel=%s\" option channel count %d exceeds destination image %d", channels_json, count, channels_dst );
error = 1;
}
/* parse the "channel" option as JSON string */
if(!error)
for(i = 0; i < count && !error; ++i)
{
oyjl_val v = oyjl_value_pos_get( json, i );
if( OYJL_IS_NUMBER(v) ||
OYJL_IS_DOUBLE(v) )
{
channel[i] = OYJL_GET_DOUBLE( v );
if(channel[i] == -1)
channel[i] = 0.5;
channel_pos[i] = -1;
} else if( OYJL_IS_STRING( v ) )
{
const char * p = OYJL_GET_STRING( v );
channel_pos[i] = p[0] - 'a';
if(channel_pos[i] >= channels_src)
{
WARNc2_S( "channel position %d not available in source image %d", channel_pos[i], channels_src );
error = 1;
}
}
}
oyjl_tree_free( json );
if(!error)
{
int w,h,x,y, start_x,start_y, max_value = -1;
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
oyRectangle_s_ roi_= {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * roi = (oyRectangle_s*)&roi_;
oyArray2d_s * array_out;
uint8_t ** array_out_data;
/* get pixel layout infos for copying */
oyDATATYPE_e data_type_out = oyToDataType_m( layout_dst );
int bps_out = oyDataTypeGetSize( data_type_out );
/* get the source pixels */
result = oyFilterNode_Run( input_node, plug, ticket );
/* get the channel buffers */
array_out = oyPixelAccess_GetArray( ticket );
array_out_data = oyArray2d_GetData( array_out );
w = oyArray2d_GetWidth( array_out ) / channels_dst;
h = oyArray2d_GetHeight( array_out );
switch(data_type_out)
{
case oyUINT8:
max_value = 255;
break;
case oyUINT16:
max_value = 65535;
break;
case oyUINT32:
max_value = UINT32_MAX;
break;
case oyHALF:
case oyFLOAT:
case oyDOUBLE:
max_value = 1.0;
break;
}
oyRectangle_SetByRectangle( roi, ticket_roi );
oyRectangle_Scale( roi, ticket_array_pix_width );
start_x = OY_ROUND(roi_.x);
start_y = OY_ROUND(roi_.y);
/* copy the channels */
#if defined(USE_OPENMP)
#pragma omp parallel for private(x,y,i)
#endif
for(y = start_y; y < h; ++y)
{
for(x = start_x; x < w; ++x)
{
union u8421 { uint32_t u4; uint16_t u2; uint8_t u1; float f; double d; };
union u8421 cache[max_channels];
float flt;
uint32_t u4;
/* fill the intermediate pixel cache;
* It is not known which channels are needed and in which order.
* Thus all channels are stored outside the main buffer.
*/
for(i = 0; i < count; ++i)
{
int pos = (channel_pos[i] == -1) ? i : channel_pos[i];
switch(data_type_out)
{
case oyUINT8:
cache[i].u1 = (channel_pos[i] == -1) ? OY_ROUND(channel[i] * max_value) : array_out_data[y][x*channels_dst*bps_out + pos*bps_out];
break;
case oyUINT16:
cache[i].u2 = (channel_pos[i] == -1) ? OY_ROUND(channel[i] * max_value) : *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyUINT32:
cache[i].u4 = (channel_pos[i] == -1) ? OY_ROUND(channel[i] * max_value) : *((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyHALF:
flt = channel[i] * max_value;
memcpy( &u4, &flt, 4 );
cache[i].u2 = (channel_pos[i] == -1) ? OY_FLOAT2HALF(u4) : *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyFLOAT:
cache[i].f = (channel_pos[i] == -1) ? channel[i] * max_value : *((float*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyDOUBLE:
cache[i].d = (channel_pos[i] == -1) ? channel[i] * max_value : *((double*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
}
}
/* read back all scattered channels */
for(i = 0; i < count; ++i)
{
int pos = i;
switch(data_type_out)
{
case oyUINT8:
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = cache[pos].u1;
break;
case oyUINT16:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].u2;
break;
case oyUINT32:
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].u4;
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].u2;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].f;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].d;
break;
}
}
}
}
oyArray2d_Release( &array_out );
}
oyImage_Release( &output_image );
} else /* nothing to do */
result = oyFilterNode_Run( input_node, plug, ticket );
oyraFilter_ImageChannelRun_clean2:
oyFilterPlug_Release( &plug );
oyFilterNode_Release( &input_node );
}
oyraFilter_ImageChannelRun_clean:
oyImage_Release( &image );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &node );
return result;
}
#define OY_IMAGE_CHANNEL_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "channel"
/** @instance oyra_api7
* @brief oyra oyCMMapi7_s implementation
*
* a filter providing a channel image filter
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2016/04/04 (Oyranos: 0.9.6)
*/
oyCMMapi_s * oyraApi7ImageChannelCreate(void)
{
oyCMMapi7_s * channel7;
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32,
oyHALF, oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s * plug = oyConnectorImaging_New(0),
* socket = oyConnectorImaging_New(0);
static oyConnectorImaging_s * plugs[2] = {0,0},
* sockets[2] = {0,0};
plugs[0] = plug;
sockets[0] = socket;
oyConnectorImaging_SetDataTypes( plug, data_types, 6 );
oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( plug, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 );
oyConnectorImaging_SetDataTypes( socket, data_types, 6 );
oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( socket, 0 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 );
channel7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_CHANNEL_REGISTRATION,
cmm_version, module_api,
NULL,
oyraFilter_ImageChannelRun,
(oyConnector_s**)plugs, 1, 0,
(oyConnector_s**)sockets, 1, 0,
0, 0 );
return (oyCMMapi_s*) channel7;
}
const char * oyraApi4UiImageChannelGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "image_channel";
else if(type == oyNAME_NAME)
return _("Image[channel]");
else if(type == oyNAME_DESCRIPTION)
return _("Channel Image Filter Object");
} else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The filter is used to reduce pixels.");
else if(type == oyNAME_DESCRIPTION)
{
static char * help_desc = NULL;
if(!help_desc)
oyStringAddPrintf( &help_desc, 0,0, "%s\n",
_("The filter will expect a \"channel\" double option and will create, fill and process a according data version with a new job ticket. The new job tickets image, array and output_array_roi will be divided by the supplied \"channel\" factor. It's plug will request the divided image sizes from the source socket.") );
return help_desc;
}
} else if(strcmp(select,"category") == 0)
{
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return _("Image/Simple Image[channel]");
else if(type == oyNAME_DESCRIPTION)
return _("The filter is used to reduce pixels.");
}
return 0;
}
/** @instance oyra_api4
* @brief oyra oyCMMapi4_s implementation
*
* a filter providing a channel image filter
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2016/04/04 (Oyranos: 0.9.6)
*/
oyCMMapi_s * oyraApi4ImageChannelCreate(void)
{
static const char * oyra_api4_ui_image_channel_texts[] = {"name", "help", "category", 0};
oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[channel]", /* category */
oyraApi4UiImageChannelGetText,
oyra_api4_ui_image_channel_texts, 0 );
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
oyCMMapi4_s * channel4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_CHANNEL_REGISTRATION,
cmm_version, module_api,
NULL,
NULL,
NULL,
ui,
NULL );
return (oyCMMapi_s*)channel4;
}
/* OY_IMAGE_CHANNEL_REGISTRATION ----------------------------------------------*/
/* ---------------------------------------------------------------------------*/
|
agilekeychain_fmt_plug.c | /* 1Password Agile Keychain cracker patch for JtR. Hacked together during
* July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* This software is based on "agilekeychain" project but no actual code is
* borrowed from it.
*
* "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_agile_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_agile_keychain);
#else
#include <string.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1 // tuned on core i7
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "johnswap.h"
#include "options.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_LABEL "agilekeychain"
#define FORMAT_NAME "1Password Agile Keychain"
#define FORMAT_TAG "$agilekeychain$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 8
#define IVLEN 8
#define CTLEN 1040
static struct fmt_tests agile_keychain_tests[] = {
{"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"},
{"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"},
{"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""},
{"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned int nkeys;
unsigned int iterations[2];
unsigned int saltlen[2];
unsigned char salt[2][SALTLEN];
unsigned int ctlen[2];
unsigned char ct[2][CTLEN];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_align(sizeof(*cracked),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int ctlen;
int saltlen;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */
goto err;
if (!isdec(p))
goto err;
if (atoi(p) > 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
saltlen = atoi(p);
if(saltlen > SALTLEN)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if(strlen(p) != saltlen * 2)
goto err;
if(!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ct length */
goto err;
if (!isdec(p))
goto err;
ctlen = atoi(p);
if (ctlen > CTLEN)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if(strlen(p) != ctlen * 2)
goto err;
if(!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$agilekeychain$" */
p = strtokm(ctcopy, "*");
cs.nkeys = atoi(p);
p = strtokm(NULL, "*");
cs.iterations[0] = atoi(p);
p = strtokm(NULL, "*");
cs.saltlen[0] = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen[0]; i++)
cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.ctlen[0] = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.ctlen[0]; i++)
cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int akcdecrypt(unsigned char *derived_key, unsigned char *data)
{
unsigned char out[CTLEN];
int n, key_size;
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, data + CTLEN - 32, 16);
if (AES_set_decrypt_key(derived_key, 128, &akey) < 0)
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT);
n = check_pkcs_pad(out, CTLEN, 16);
if (n < 0)
return -1;
key_size = n / 8;
if (key_size != 128 && key_size != 192 && key_size != 256)
// "invalid key size"
return -1;
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt[0], cur_salt->saltlen[0], cur_salt->iterations[0], pout, 16, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if(akcdecrypt(master[i], cur_salt->ct[0]) == 0)
cracked[i+index] = 1;
else
cracked[i+index] = 0;
}
#else
unsigned char master[32];
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]),
cur_salt->salt[0], cur_salt->saltlen[0],
cur_salt->iterations[0], master, 16, 0);
if(akcdecrypt(master, cur_salt->ct[0]) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void agile_keychain_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations[0];
}
struct fmt_main fmt_agile_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{
"iteration count",
},
{ FORMAT_TAG },
agile_keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
agile_keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
frameprocessing.h | #ifndef FRAMEPROCESSING_H
#define FRAMEPROCESSING_H
#include <QDebug>
#include <QList>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <memory>
#include "headers/cpp_interface/framerateoptions.h"
#include "headers/cpp_interface/teardata.h"
#include "headers/qml_models/tearoptionsmodel.h"
#include "headers/qml_models/generaloptionsmodel.h"
//! Holds two consequtive frames, calculates the difference frame and applies
//! the framerate, tear and frametime detection algorithms
class FrameProcessing
{
//! constructors
public:
//! default constructor
FrameProcessing()
: _received_first_frames(false)
, _max_video_count(3)
{
_init_member_with_default_recorded_framerates();
}
//! methods
public:
//! returns difference frames for each video (if a previous frame was accessable)
//! fills the member which make the framerate/frametime/tears accessable
//! this should be refactored in the future TODO
QList<cv::Mat> check_for_difference(const QList<cv::Mat> & cv_frame_list
, std::shared_ptr<QList<FramerateOptions>> shared_fps_options_list
, std::shared_ptr<QList<TearOptions>> shared_tear_options_list)
{
// we can only calculate the difference if we have at least two sets of frames
if (!_received_first_frames)
{
_received_first_frames = true;
_cache_framelist(cv_frame_list);
return cv_frame_list;
} else
{
// default init TODO refactor
_tear_rows.clear();
_difference_frames.clear();
for (int i = 0; i < cv_frame_list.size(); ++i)
{
_difference_frames.push_back(cv::Mat());
}
// if multiple videos are loaded, the cache list has not all frames loaded, wait for next iteration
// refactored this from the loop to allow omp
bool all_cached_frames_filled = true;
for (int i = 0; i < cv_frame_list.size(); ++i)
{
all_cached_frames_filled = all_cached_frames_filled && !_cached_frames[i].empty();
}
// TODO test this for performance
if (all_cached_frames_filled) {
#pragma omp parallel for
for (int i = 0; i < cv_frame_list.size(); ++i)
{
const quint32 pixel_difference = (*shared_fps_options_list)[i].pixel_difference.value();
_difference_frames[i] = _get_difference(_cached_frames[i], cv_frame_list[i], pixel_difference).clone();
// explicit convertion for linter
const size_t _i = static_cast<size_t>(i);
const size_t _frame_count = _current_framecount_list[_i];
// calculate a diff frame based on the amount of "same" rows in the compared frames
double dismiss_tear_percentage = (*shared_tear_options_list)[i].dismiss_tear_percentage.value() / 100;
// fill the frame difference, the diff counter and tears
_frame_diff_lists[_i][_frame_count] = _get_frame_difference(_difference_frames[i], dismiss_tear_percentage, i, cv_frame_list.size());
}
}
// increments the framecounter for each video and loops automatically
_increment_current_framecount();
}
// save the current frame list
_cache_framelist(cv_frame_list);
return _difference_frames;
}
//! calculates the current framerate for each video
QList<double> get_framerates() const
{
QList<double> framerates;
for (size_t i = 0; i < _frame_diff_lists.size(); ++i)
{
framerates.push_back(_calculate_framerate(i));
}
return framerates;
}
//! calculates the current frametimes for each video for the last frames
const QList<double> get_frametimes()
{
QList<double> frametimes;
for (size_t i = 0; i < _frame_diff_lists.size(); ++i)
{
frametimes.push_back(_calculate_frametime(i));
}
return frametimes;
}
//! returns the tear indices from the last calculation
std::vector<TearData> get_tear_indices() const
{
return _tear_rows;
}
//! resets the state of the object, but to ensure that the class is in a well defined state and nobody misuses it
//! we need to know the recorded framerates (with no videos loaded this list will be empty)
void reset_state(const QList<quint8> recorded_framerate_list)
{
_frame_diff_lists.clear();
_current_framecount_list.clear();
_cached_frames.clear();
_difference_frames.clear();
_init_member(recorded_framerate_list);
}
//! methods
private:
//! sums up the vector with (0's and 1's) to get the resulting framerate
double _calculate_framerate(size_t video_index) const
{
double framecount = std::accumulate(_frame_diff_lists[video_index].begin()
, _frame_diff_lists[video_index].end()
, 0.0);
return framecount;
}
//! frametime for the last visible frame in milliseconds
double _calculate_frametime(size_t video_index) const
{
//! yes this the is the recorded framerate, TODO refactor this into a method with comments
const size_t recorded_framerate = _frame_diff_lists[video_index].size();
size_t current_framerate_count = _current_framecount_list[video_index];
// we always look at the previous frame first
current_framerate_count = _decrement_modulo(current_framerate_count, _frame_diff_lists[video_index].size() - 1);
// if the recorded framerate is zero, there is no video loaded for that index
if (recorded_framerate == 0) { return 0; }
// if no frames were analyzed, we can't analyze the frametime, we need a [1, ..., 1] diff window
if (_calculate_framerate(video_index) < 2.0) { return 0; }
// counter of how long the frame is show per 1 / recorded_framerate in seconds
double frame_time_counter = 0.0;
// start looping backwards for the frame_diff_list until we reach a new frame
bool iterating_over_same_frame = true;
bool found_last_diff = false;
while (iterating_over_same_frame)
{
const double diff = _frame_diff_lists[video_index][current_framerate_count];
if (found_last_diff)
{
if (diff == 0.0) frame_time_counter += 1.0;
if (diff > 0) { frame_time_counter += 1.0; iterating_over_same_frame = false; }
} else {
if (diff > 0) found_last_diff = true;
}
current_framerate_count = _decrement_modulo(current_framerate_count, _frame_diff_lists[video_index].size() - 1);
}
double frametime_in_s = frame_time_counter / static_cast<double>(recorded_framerate);
return frametime_in_s * 1000;
}
//! wrapper around _init_member, called from the constructor
void _init_member_with_default_recorded_framerates()
{
QList<quint8> _default_recorded_framerates;
for (int i = 0; i < _max_video_count; ++i)
{
_default_recorded_framerates.push_back(0);
}
return _init_member(_default_recorded_framerates);
}
//! the lists of _frame_diff_lists are initialized to be the size of the framerate of each recorded video
//! as we can not recognize a higher framerate than the one it was recorded with
void _init_member(const QList<quint8> recorded_framerate_list)
{
for (int i = 0; i < _max_video_count; ++i)
{
quint8 recorded_framerate = 0;
const bool has_recorded_framerate = i < recorded_framerate_list.size();
if (has_recorded_framerate)
{
recorded_framerate = recorded_framerate_list[i];
}
_frame_diff_lists.push_back(std::vector<double>(recorded_framerate, 0.0));
_cached_frames.push_back(cv::Mat());
_difference_frames.push_back(cv::Mat());
_current_framecount_list.push_back(0);
}
// without a seconds frame frames can't be compared
_received_first_frames = false;
}
//! copies the framelist as cv::Mat is a smart pointer and need to be copied manually
void _cache_framelist(const QList<cv::Mat> & _other)
{
for (int i = 0; i < _other.size(); ++i)
{
_cached_frames[i] = _other[i].clone();
}
}
//! increases the framecount for each video and cuts it by the recorded_framerate (see _init_member())
//! mod 0 is invalid, so we have to catch that
void _increment_current_framecount()
{
for (size_t i = 0; i < _current_framecount_list.size(); ++i)
{
quint8 recorded_framerate = static_cast<quint8>(_frame_diff_lists[i].size());
_current_framecount_list[i] += 1;
if (recorded_framerate != 0) _current_framecount_list[i] %= recorded_framerate;
}
}
//! TODO this seems dirty, should be in a util file?
size_t _decrement_modulo(size_t value, size_t max_value) const
{
if (value == 0) return max_value;
else return value - 1;
}
//! calculates the difference between the two frames
//! following the opencv "api" so the methods are interchangeable
cv::Mat _get_difference(const cv::Mat & first_frame, const cv::Mat & second_frame, const quint32 pixel_difference) const
{
cv::Mat difference;
//cv::absdiff(first_frame, second_frame, difference);
_are_equal_with_draw(first_frame, second_frame, static_cast<int>(pixel_difference), difference);
return difference;
}
//! comparing the greyscale values of two frames and drawing a white pixel for a great enough difference
//! test omp for performance gains
//! take a look at https://stackoverflow.com/questions/18464710/how-to-do-per-element-comparison-and-do-different-operation-according-to-result
void _are_equal_with_draw(const cv::Mat & frame_a, const cv::Mat & frame_b, const int pixel_difference, cv::Mat & output) const
{
cv::Mat black_white_frame_a;
cv::Mat black_white_frame_b;
cv::cvtColor(frame_a, black_white_frame_a, cv::COLOR_BGRA2GRAY);
cv::cvtColor(frame_b, black_white_frame_b, cv::COLOR_BGRA2GRAY);
output = frame_a.clone();
for (int i = 0; i < black_white_frame_a.rows; i += 1) {
for (int j = 0; j < black_white_frame_a.cols; j += 1) {
int ac(std::max(black_white_frame_a.at<uchar>(i, j)
, black_white_frame_b.at<uchar>(i, j)));
int bc(std::min(black_white_frame_a.at<uchar>(i, j)
, black_white_frame_b.at<uchar>(i, j)));
if (ac - bc > pixel_difference) {
// on difference, set to white
output.at<cv::Vec3b>(i,j)[0] = 255;
output.at<cv::Vec3b>(i,j)[1] = 255;
output.at<cv::Vec3b>(i,j)[2] = 255;
} else {
// on "same" pixel, set to black
output.at<cv::Vec3b>(i,j)[0] = 0;
output.at<cv::Vec3b>(i,j)[1] = 0;
output.at<cv::Vec3b>(i,j)[2] = 0;
}
}
}
}
//! returns 0 if the compared frames were identical
//! returns 1 if the frames were at least in 1 pixel different
//! if we detect a tear, we check how big it is (height) and see if its above the dismiss_tear_percentage
//! if it is, we return the percentage of the frame which was not a tear (e.g 1 - tear_percentage)
//! if it is NOT, we return 1 e.g we think of them as completely different
double _get_frame_difference(const cv::Mat & difference, const double dismiss_tear_percentage, int video_index, int video_count)
{
// how much of a row has to be different to see it as a "tear cut"
const double tear_row_completness = 0.1;
// normalized row differences
const std::vector<double> row_differences = _get_row_differences(difference);
// if we found a different pixel and no tears, we have to return 1 (as in increment the framerate by 1)
bool found_different_pixel = false;
//
bool tear_found = false;
// saving the indices where tears were found. order: (lower, higher) indices
for (size_t row = 1; row < row_differences.size(); ++row)
{
const size_t index_A = row - 1;
const size_t index_B = row;
const double row_diff_A = row_differences[index_A];
const double row_diff_B = row_differences[index_B];
// save tear indices if we found them
if (_are_tear_rows(row_diff_A, row_diff_B, tear_row_completness))
{
tear_found = true;
_tear_rows.push_back(TearData(index_A, index_B, static_cast<size_t>(video_index), static_cast<size_t>(video_count), difference.rows));
}
// if a difference is found set it, otherwise use the accumulated result
found_different_pixel = row_diff_A > 0.0 || found_different_pixel;
}
// if a tear was found, we return the remaining part of the frame
if (tear_found)
{
const double max_tear_percentage = _get_max_tear_percentage(row_differences, _tear_rows);
// if the tear is not big enough, we dismiss it
if (max_tear_percentage < dismiss_tear_percentage)
{
// if we found a different pixel, we round to a full difference -> 100% -> 1.0, otherwise
if (found_different_pixel) return 1.0;
else return 0.0;
// if the tear is big enough, we return the remaining frame
} else return 1 - max_tear_percentage;
}
// if no tear was found but a different pixel was found, we "round" to a full difference -> 100% -> 1.0
const bool tear_not_found = !tear_found;
if (found_different_pixel && tear_not_found) return 1.0;
// if no different pixel was found, it has to be a duplicate frame, e.g (0% difference -> 0.0)
return 0.0;
}
//! returns the percentage of the biggest tear found by comparing the height
double _get_max_tear_percentage(const std::vector<double> & row_differences
, const std::vector<TearData> & tear_rows) const
{
double max_tear_percentage = 0.0;
for (size_t i = 0; i < tear_rows.size(); ++i)
{
const std::tuple<size_t, size_t> tear = tear_rows[i].get_indices();
const size_t index_A = std::get<0>(tear);
const size_t index_B = std::get<1>(tear);
if (row_differences[index_A] == 0.0)
{
// go in the direction of index_B + 1
size_t index = index_B;
size_t counter = 0;
while (row_differences[index] > 0.0)
{
counter += 1;
if (index == row_differences.size() - 1) break;
index += 1;
}
const double tear_percentage = static_cast<double>(counter) / static_cast<double>(row_differences.size());
if (tear_percentage > max_tear_percentage) max_tear_percentage = tear_percentage;
}
if (row_differences[index_B] == 0.0)
{
// go in the direction of index_A - 1
size_t index = index_A;
size_t counter = 0;
while (row_differences[index] > 0.0)
{
counter += 1;
if (index == 0) break;
index -= 1;
}
const double tear_percentage = static_cast<double>(counter) / static_cast<double>(row_differences.size());
if (tear_percentage > max_tear_percentage) max_tear_percentage = tear_percentage;
}
}
return max_tear_percentage;
}
//! checks if the two normalized row differences count as a tear
//! tear_row_completeness defines the least amount of different pixel consequtive rows have to have to be counted as tear
bool _are_tear_rows(const double row_diff_A, const double row_diff_B, const double tear_row_completness) const
{
// the tear happened with the old frame in row_diff_A and new frame in row_diff_B
// duplicate row (e.g black)
if (row_diff_A == 0.0)
{
// new row within bounds
if (row_diff_B >= tear_row_completness)
{
return true;
}
}
// the tear happened with the new frame in row_diff_A and old frame in row_diff_B
// new row within bounds
if (row_diff_A >= tear_row_completness)
{
// duplicate row (e.g black)
if (row_diff_B == 0.0)
{
return true;
}
}
// otherwise no tear is detected
return false;
}
//! returns a summary for each row how many differences were found (normalized to 0.0 ~ 0% - 1.0 ~ 100%)
//! see also _get_row_difference
std::vector<double> _get_row_differences(const cv::Mat & difference) const
{
std::vector<double> row_differences(static_cast<size_t>(difference.rows), 0.0);
#pragma omp parallel for
for (int row = 0; row < difference.rows; ++row)
{
row_differences[static_cast<size_t>(row)] = _get_row_difference(difference, row);
}
return row_differences;
}
//! return the percentage of the row (0.0 - 1.0) for how many pixel are different (e.g not black)
//! if 3 pixel were not black from a 10 pixel row we would return 0.3, e.g 30%
double _get_row_difference(const cv::Mat & difference, const int row) const
{
double difference_counter = 0;
#pragma omp parallel for reduction(+:difference_counter)
for (int col = 0; col < difference.cols; ++col)
{
bool red_channel_is_not_black = difference.at<cv::Vec3b>(row,col)[0] != 0;
bool green_channel_is_not_black = difference.at<cv::Vec3b>(row,col)[1] != 0;
bool blue_channel_is_not_black = difference.at<cv::Vec3b>(row,col)[2] != 0;
bool pixel_is_not_black = red_channel_is_not_black && green_channel_is_not_black && blue_channel_is_not_black;
if (pixel_is_not_black) difference_counter += 1.0;
}
return difference_counter / static_cast<double>(difference.cols);
}
//! member
private:
//! holds the current framecount of each video (used to access the inner lists of _frame_diff_lists)
std::vector<size_t> _current_framecount_list;
//! checks if we already received the first frames (only important on startup)
bool _received_first_frames;
//! saves the t-1 frames
QList<cv::Mat> _cached_frames;
//! the list which has a list for each video consisting of 0's or 1's, counting the differing frames
std::vector<std::vector<double>> _frame_diff_lists;
//! maximum video count
const quint8 _max_video_count;
//! if tears were found, they are saved for a frame here
std::vector<TearData> _tear_rows;
//! caches the frame differences which may be rendered if need be
QList<cv::Mat> _difference_frames;
};
#endif // FRAMEPROCESSING_H
|
GB_unop__identity_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fp64)
// op(A') function: GB (_unop_tran__identity_int16_fp64)
// C type: int16_t
// A type: double
// cast: int16_t cij = GB_cast_to_int16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fp64)
(
int16_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
VolumetricConvolutionMM.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricConvolutionMM.c"
#else
static void inline THNN_(VolumetricConvolutionMM_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *weight,
THTensor *bias,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH) {
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THArgCheck(kT > 0 && kW > 0 && kH > 0, 8,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW);
int ndim = input->nDimension;
int dimf = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5)
{
dimf++;
dimt++;
dimh++;
dimw++;
}
int64_t nInputPlane;
int64_t inputDepth;
int64_t inputHeight;
int64_t inputWidth;
int64_t nOutputPlane;
int64_t outputDepth;
int64_t outputHeight;
int64_t outputWidth;
nInputPlane = input->size[dimf];
inputDepth = input->size[dimt];
inputHeight = input->size[dimh];
inputWidth = input->size[dimw];
nOutputPlane = weight->size[0];
outputDepth = (inputDepth + 2*pT - kT) / dT + 1;
outputHeight = (inputHeight + 2*pH - kH) / dH + 1;
outputWidth = (inputWidth + 2*pW - kW) / dW + 1;
if (outputWidth < 1 || outputHeight < 1 || outputDepth < 1)
{
THError(
"Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small",
nInputPlane, inputDepth, inputHeight, inputWidth,
nOutputPlane, outputDepth, outputHeight, outputWidth
);
}
THArgCheck(weight->nDimension == 2 || weight->nDimension == 5, 4,
"weight tensor should be 2D or 5D - got %d", weight->nDimension);
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(view_weight)(THTensor *weight)
{
weight = THTensor_(newContiguous)(weight);
if (weight->nDimension == 5) {
int64_t s1 = weight->size[0];
int64_t s2 = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4];
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset,
s1, -1, s2, -1);
THTensor_(free)(old_weight);
}
return weight;
}
/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */
static void THNN_(unfolded_acc_vol)(
THTensor *finput,
THTensor *input,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
long nInputPlane,
long inputDepth,
long inputWidth,
long inputHeight,
long outputDepth,
long outputWidth,
long outputHeight)
{
long nip;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
//#pragma omp parallel for private(nip)
for (nip = 0; nip < nInputPlane; nip++)
{
long kt, kw, kh, t, y, x, it, ix, iy;
for (kt = 0; kt < kT; kt++)
{
for (kh = 0; kh < kH; kh++)
{
for (kw = 0; kw < kW; kw++)
{
real *src = finput_data
+ nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth)
+ kt * (kH*kW*outputDepth*outputHeight*outputWidth)
+ kh * (kW*outputDepth*outputHeight*outputWidth)
+ kw * (outputDepth*outputHeight*outputWidth);
real *dst = input_data + nip*(inputDepth*inputHeight*inputWidth);
if (pT > 0 || pH > 0 || pW > 0)
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT - pT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH - pH + kh;
for (x = 0; x < outputWidth; x++)
{
ix = x*dW - pW + kw;
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth)
{
}
else
{
real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1);
}
}
}
}
}
else
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH + kh;
for(x = 0; x < outputWidth; x++)
{
ix = x*dW + kw;
real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix;
THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1);
}
}
}
}
}
}
}
}
}
static void THNN_(unfolded_copy_vol)(
THTensor *finput,
THTensor *input,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
long nInputPlane,
long inputDepth,
long inputWidth,
long inputHeight,
long outputDepth,
long outputWidth,
long outputHeight)
{
int64_t k;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
// #pragma omp parallel for private(k)
for (k = 0; k < nInputPlane*kT*kH*kW; k++)
{
long nip = k / (kT*kH*kW);
long rest = k % (kT*kH*kW);
long kt = rest / (kH*kW);
rest = rest % (kH*kW);
long kh = rest / kW;
long kw = rest % kW;
long t,x,y,it,ix,iy;
real *dst = finput_data
+ nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth)
+ kt * (kH*kW*outputDepth*outputHeight*outputWidth)
+ kh * (kW*outputDepth*outputHeight*outputWidth)
+ kw * (outputDepth*outputHeight*outputWidth);
real *src = input_data + nip*(inputDepth*inputHeight*inputWidth);
if (pT > 0 || pH > 0 || pW > 0)
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT - pT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH - pH + kh;
for (x = 0; x < outputWidth; x++)
{
ix = x*dW - pW + kw;
if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth)
memset(dst+t*outputHeight*outputWidth+y*outputWidth+x, 0, sizeof(real)*(1));
else
memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1));
}
}
}
}
else
{
for (t = 0; t < outputDepth; t++)
{
it = t*dT + kt;
for (y = 0; y < outputHeight; y++)
{
iy = y*dH + kh;
for(x = 0; x < outputWidth; x++)
{
ix = x*dW + kw;
memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1));
}
}
}
}
}
}
static void THNN_(VolumetricConvolutionMM_updateOutput_frame)(
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
int64_t nInputPlane,
int64_t inputDepth,
int64_t inputWidth,
int64_t inputHeight,
int64_t nOutputPlane,
int64_t outputDepth,
int64_t outputWidth,
int64_t outputHeight)
{
int64_t i;
THTensor *output2d;
THNN_(unfolded_copy_vol)(
finput, input,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
nInputPlane,
inputDepth, inputWidth, inputHeight,
outputDepth, outputWidth, outputHeight
);
output2d = THTensor_(newWithStorage2d)(
output->storage, output->storageOffset, nOutputPlane, -1,
outputDepth*outputHeight*outputWidth, -1
);
if (bias) {
for (i = 0; i < nOutputPlane; i++)
{
THVector_(fill)(
output->storage->data+output->storageOffset+output->stride[0]*i,
THTensor_(get1d)(bias, i),
outputDepth*outputHeight*outputWidth
);
}
} else {
THTensor_(zero)(output);
}
THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput);
THTensor_(free)(output2d);
}
void THNN_(VolumetricConvolutionMM_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimf = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
int64_t nInputPlane;
int64_t inputDepth;
int64_t inputHeight;
int64_t inputWidth;
int64_t nOutputPlane;
int64_t outputDepth;
int64_t outputHeight;
int64_t outputWidth;
THNN_(VolumetricConvolutionMM_shapeCheck)(
state, input, NULL, weight, bias,
kT, kW, kH, dT, dW, dH, pT, pW, pH);
input = THTensor_(newContiguous)(input);
if (input->nDimension == 5)
{
dimf++;
dimt++;
dimh++;
dimw++;
}
nInputPlane = input->size[dimf];
inputDepth = input->size[dimt];
inputHeight = input->size[dimh];
inputWidth = input->size[dimw];
nOutputPlane = weight->size[0];
outputDepth = (inputDepth + 2*pT - kT) / dT + 1;
outputHeight = (inputHeight + 2*pH - kH) / dH + 1;
outputWidth = (inputWidth + 2*pW - kW) / dW + 1;
weight = THNN_(view_weight)(weight);
if (input->nDimension == 4)
{
THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth);
THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth);
THNN_(VolumetricConvolutionMM_updateOutput_frame)(
input, output, weight, bias, finput,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
nInputPlane, inputDepth, inputWidth, inputHeight,
nOutputPlane, outputDepth, outputWidth, outputHeight
);
}
else
{
int64_t T = input->size[0];
int64_t t;
THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth);
THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth);
// #pragma omp parallel for private(t)
for (t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(VolumetricConvolutionMM_updateOutput_frame)(
input_t, output_t, weight, bias, finput_t,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
nInputPlane, inputDepth, inputWidth, inputHeight,
nOutputPlane, outputDepth, outputWidth, outputHeight
);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(weight);
}
static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)(
THTensor *gradInput,
THTensor *gradOutput,
THTensor *weight,
THTensor *fgradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
THTensor *gradOutput2d = THTensor_(newWithStorage2d)(
gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1
);
THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d);
THTensor_(free)(gradOutput2d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc_vol)(
fgradInput, gradInput,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
gradInput->size[0], gradInput->size[1], gradInput->size[3], gradInput->size[2],
gradOutput->size[1], gradOutput->size[3], gradOutput->size[2]
);
}
void THNN_(VolumetricConvolutionMM_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int nOutputPlane = (int)weight->size[0];
THNN_(VolumetricConvolutionMM_shapeCheck)(
state, input, gradOutput, weight, NULL,
kT, kW, kH, dT, dW, dH, pT, pW, pH);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
weight = THNN_(view_weight)(weight);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
// depending on the BLAS library, fgradInput (result tensor) might
// be left uninitialized on zero alpha, which might lead to weird behavior
// hence, to be safe, zero it
THTensor_(zero)(fgradInput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 0, 1);
if (input->nDimension == 4)
{
THNN_(VolumetricConvolutionMM_updateGradInput_frame)(
gradInput, gradOutput, tweight, fgradInput,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH
);
}
else
{
int64_t T = input->size[0];
int64_t t;
//#pragma omp parallel for private(t)
for (t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(VolumetricConvolutionMM_updateGradInput_frame)(
gradInput_t, gradOutput_t, tweight, fgradInput_t,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH
);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)(
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
real scale)
{
int64_t i;
THTensor *gradOutput2d = THTensor_(newWithStorage2d)(
gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1
);
THTensor *tfinput = THTensor_(new)();
THTensor_(transpose)(tfinput, finput, 0, 1);
THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput);
THTensor_(free)(tfinput);
if (gradBias) {
for (i = 0; i < gradBias->size[0]; i++)
{
int64_t k;
real sum = 0;
real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0];
for (k = 0; k < gradOutput2d->size[1]; k++)
sum += data[k];
(gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum;
}
}
THTensor_(free)(gradOutput2d);
}
void THNN_(VolumetricConvolutionMM_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int nOutputPlane = (int)gradWeight->size[0];
THNN_(VolumetricConvolutionMM_shapeCheck)(
state, input, gradOutput, gradWeight, gradBias,
kT, kW, kH, dT, dW, dH, pT, pW, pH);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
gradWeight = THNN_(view_weight)(gradWeight);
if (input->nDimension == 4) // non-batch mode
{
THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale);
}
else // batch mode
{
int64_t T = input->size[0];
int64_t t;
for (t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(gradWeight);
}
#endif
|
mozilla_ng_fmt_plug.c | /*
* Cracker for Mozilla's key3.db's master password.
*
* All the real logic here is borrowed from Milen Rangelov's Hashkill project
* and from Deque's article.
*
* Thanks to Jim Fougeron for all the help!
*
* This software is Copyright (c) 2014, Sanju Kholia <sanju.kholia [at]
* gmail.com> and Dhiru Kholia <dhiru [at] openwall.com>, and it is hereby
* released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mozilla;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mozilla);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include <stdint.h>
#include <openssl/des.h>
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include "sha.h"
#define FORMAT_LABEL "Mozilla"
#define FORMAT_NAME "Mozilla key3.db"
#define FORMAT_TAG "$mozilla$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "SHA1 3DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$mozilla$*3*20*1*5199adfab24e85e3f308bacf692115f23dcd4f8f*11*2a864886f70d010c050103*16*9debdebd4596b278de029b2b2285ce2e*20*2c4d938ccb3f7f1551262185ccee947deae3b8ae", "12345678"},
{"$mozilla$*3*20*1*4f184f0d3c91cf52ee9190e65389b4d4c8fc66f2*11*2a864886f70d010c050103*16*590d1771368107d6be64844780707787*20*b8458c712ffcc2ff938409804cf3805e4bb7d722", "openwall"},
{"$mozilla$*3*20*1*897f35ff10348f0d3a7739dbf0abddc62e2e64c3*11*2a864886f70d010c050103*16*1851b917997b3119f82b8841a764db62*20*197958dd5e114281f59f9026ad8b7cfe3de7196a", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
SHA_CTX pctx;
int global_salt_length;
unsigned char global_salt[20];
int local_salt_length; // entry-salt (ES)
unsigned char local_salt[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *keepptr;
int res;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
keepptr=strdup(ciphertext);
p = &keepptr[TAG_LENGTH];
if (*p != '*')
goto err;
++p;
if ((p = strtokm(p, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res != 3) /* we only know about this particular version */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* nnLen (we ignore nnlen) */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt */
goto err;
if (strlen(p) /2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidDatalen */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidData */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keepptr);
return 1;
err:
MEM_FREE(keepptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, SALT_SIZE); // cs.local_salt needs to be zero padded to length 20
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
cs.local_salt_length = atoi(p);
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
for (i = 0; i < cs.local_salt_length; i++)
cs.local_salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) |
atoi16[ARCH_INDEX(p[2 * i + 1])];
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
// Not stored in salt. This is the binary length
q = strchr(p, '*'); // password_check
p = q + 1;
// Not stored in salt, this is the binary.
q = strchr(p, '*'); // global_salt_length
p = q + 1;
cs.global_salt_length = atoi(p);
q = strchr(p, '*'); // global_salt
p = q + 1;
for (i = 0; i < cs.global_salt_length; i++)
cs.global_salt[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
// Calculate partial sha1 data for password hashing
SHA1_Init(&cs.pctx);
SHA1_Update(&cs.pctx, cs.global_salt, cs.global_salt_length);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p, *q;
int i;
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
q = strchr(p, '*'); // password_check
p = q + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// http://www.drh-consultancy.demon.co.uk/key3.html
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA_CTX ctx, ctxi, ctxo;
int i;
union {
unsigned char uc[64];
uint32_t ui[64/4];
} pad;
unsigned char buffer[20];
unsigned char tk[20];
unsigned char key[40];
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
// HP = SHA1(global-salt||password)
// Copy already calculated partial hash data
memcpy(&ctx, &cur_salt->pctx, sizeof(SHA_CTX));
SHA1_Update(&ctx, saved_key[index], saved_len[index]);
SHA1_Final(buffer, &ctx);
// CHP = SHA1(HP||entry-salt) // entry-salt (ES) is local_salt
SHA1_Init(&ctx);
SHA1_Update(&ctx, buffer, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
// Step 0 for all hmac, store off the first half (the key is the same for all 3)
// this will avoid having to setup the ipad/opad 2 times, and also avoids 4 SHA calls
// reducing the hmac calls from 12 SHA limbs, down to 8 and ipad/opad loads from 3
// down to 1. It adds 4 CTX memcpy's, but that is a very fair trade off.
SHA1_Init(&ctxi);
SHA1_Init(&ctxo);
memset(pad.uc, 0x36, 64);
for (i = 0; i < 20; ++i)
pad.uc[i] ^= buffer[i];
SHA1_Update(&ctxi, pad.uc, 64);
for (i = 0; i < 64/4; ++i)
pad.ui[i] ^= 0x36363636^0x5c5c5c5c;
SHA1_Update(&ctxo, pad.uc, 64);
// k1 = HMAC(PES||ES) // use CHP as the key, PES is ES which is zero padded to length 20
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(key, &ctx);
// tk = HMAC(PES) // use CHP as the key
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(tk, &ctx);
// k2 = HMAC(tk||ES) // use CHP as the key
// NOTE, ctxi and ctxo are no longer needed after this hmac, so we simply use them
SHA1_Update(&ctxi, tk, 20);
SHA1_Update(&ctxi, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctxi);
SHA1_Update(&ctxo, buffer, 20);
SHA1_Final(key+20, &ctxo);
// k = k1||k2 // encrypt "password-check" string using this key
DES_set_key((DES_cblock *) key, &ks1);
DES_set_key((DES_cblock *) (key+8), &ks2);
DES_set_key((DES_cblock *) (key+16), &ks3);
memcpy(ivec, key + 32, 8); // last 8 bytes!
// PKCS#5 padding (standard block padding)
DES_ede3_cbc_encrypt((unsigned char*)"password-check\x02\x02", (unsigned char*)crypt_out[index], 16, &ks1, &ks2, &ks3, &ivec, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mozilla_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mozilla = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
BINARY_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mozilla_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
fallingexp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *fallingexp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *fallingexp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y,*rampparams;
double goal,m,x0;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O" \
,kwlist,&rampparams,&x,&etc))
{
return NULL;
}
goal = IND(rampparams,0);
m = IND(rampparams,1);
x0 = IND(rampparams,2);
/*
goal = PyFloat_AsDouble(PyList_GetItem(rampparams,0));
m = PyFloat_AsDouble(PyList_GetItem(rampparams,1));
x0 = PyFloat_AsDouble(PyList_GetItem(rampparams,2));
*/
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(y,i) = goal*(1+exp(-1*m*(IND(x,i)-x0)));
}
return PyArray_Return(y);
}
static char module_docstring[] ="\
This function creates a model that fits a ramp using a falling exponential.\n\
\n\
Parameters\n\
----------\n\
goal: goal as x -> inf\n\
m: rise exp\n\
x0: time offset\n\
x: Array of time/phase points\n\
\n\
Returns\n\
-------\n\
This function returns an array of y values by combining an eclipse and a rising exponential\n\
\n\
Revisions\n\
---------\n\
2008-06-16 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\
2010-12-24 Nate Lust, UCF\n\
natelust at linux dot com\n\
Updated to C\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
";
static PyMethodDef module_methods[] = {
{"fallingexp",(PyCFunction)fallingexp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_fallingexp(void)
#else
initfallingexp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"fallingexp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("fallingexp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
engine.h | #pragma once
#ifndef ENGINE_H
#define ENGINE_H
// Node API includes
#include <napi.h>
// STL includes
#include <memory>
#include <unordered_map>
#include <any>
// Eigen includes
#include <Eigen/Core>
// Optimization lib includes
#include <libs/optimization_lib/include/core/core.h>
#include <libs/optimization_lib/include/core/utils.h>
#include <libs/optimization_lib/include/data_providers/mesh_wrapper.h>
#include <libs/optimization_lib/include/data_providers/empty_data_provider.h>
#include <libs/optimization_lib/include/data_providers/plain_data_provider.h>
#include <libs/optimization_lib/include/data_providers/edge_pair_data_provider.h>
#include <libs/optimization_lib/include/objective_functions/summation_objective.h>
#include <libs/optimization_lib/include/objective_functions/position/face_position_objective.h>
#include <libs/optimization_lib/include/objective_functions/separation_objective.h>
#include <libs/optimization_lib/include/objective_functions/symmetric_dirichlet_objective.h>
#include <libs/optimization_lib/include/objective_functions/seamless_objective.h>
#include <libs/optimization_lib/include/objective_functions/singularity/singular_points_position_objective.h>
#include <libs/optimization_lib/include/objective_functions/region_localization_objective.h>
#include <libs/optimization_lib/include/iterative_methods/newton_method.h>
#include <libs/optimization_lib/include/iterative_methods/projected_gradient_descent.h>
#include <libs/optimization_lib/include/solvers/eigen_sparse_solver.h>
#include <libs/optimization_lib/include/solvers/pardiso_solver.h>
class Engine : public Napi::ObjectWrap<Engine> {
public:
static Napi::Object Init(Napi::Env env, Napi::Object exports);
Engine(const Napi::CallbackInfo& info);
private:
/**
* Private type definitions
*/
enum class ModelFileType
{
OBJ,
OFF,
UNKNOWN
};
enum class BufferedPrimitiveType : uint32_t
{
VERTEX = 0,
EDGE,
TRIANGLE
};
enum class DataSource
{
DOMAIN_DATA,
IMAGE_DATA
};
enum class FacesSource
{
DOMAIN_FACES,
IMAGE_FACES
};
enum class EdgesSource
{
DOMAIN_EDGES,
IMAGE_EDGES
};
enum class VerticesSource
{
DOMAIN_VERTICES,
IMAGE_VERTICES
};
enum class AlgorithmType
{
AUTOCUTS,
AUTOQUADS
};
static Napi::FunctionReference constructor;
/**
* NAPI private instance setters
*/
void SetPositionWeight(const Napi::CallbackInfo& info, const Napi::Value& value);
void SetSeamlessWeight(const Napi::CallbackInfo& info, const Napi::Value& value);
void SetLambda(const Napi::CallbackInfo& info, const Napi::Value& value);
void SetDelta(const Napi::CallbackInfo& info, const Napi::Value& value);
/**
* NAPI private instance getters
*/
Napi::Value GetPositionWeight(const Napi::CallbackInfo& info);
Napi::Value GetSeamlessWeight(const Napi::CallbackInfo& info);
//Napi::Value GetLambda(const Napi::CallbackInfo& info);
Napi::Value GetDelta(const Napi::CallbackInfo& info);
Napi::Value GetObjectiveFunctionsData(const Napi::CallbackInfo& info);
/**
* NAPI private instance methods
*/
Napi::Value GetDomainFacesCount(const Napi::CallbackInfo& info);
Napi::Value GetImageFacesCount(const Napi::CallbackInfo& info);
Napi::Value GetDomainEdgesCount(const Napi::CallbackInfo& info);
Napi::Value GetImageEdgesCount(const Napi::CallbackInfo& info);
Napi::Value GetDomainVerticesCount(const Napi::CallbackInfo& info);
Napi::Value GetImageVerticesCount(const Napi::CallbackInfo& info);
Napi::Value GetDomainFaces(const Napi::CallbackInfo& info);
Napi::Value GetImageFaces(const Napi::CallbackInfo& info);
Napi::Value GetDomainEdges(const Napi::CallbackInfo& info);
Napi::Value GetImageEdges(const Napi::CallbackInfo& info);
Napi::Value GetDomainVertices(const Napi::CallbackInfo& info);
Napi::Value GetImageVertices(const Napi::CallbackInfo& info);
Napi::Value GetDomainBufferedFaces(const Napi::CallbackInfo& info);
Napi::Value GetImageBufferedFaces(const Napi::CallbackInfo& info);
Napi::Value GetDomainBufferedEdges(const Napi::CallbackInfo& info);
Napi::Value GetImageBufferedEdges(const Napi::CallbackInfo& info);
Napi::Value GetDomainBufferedVertices(const Napi::CallbackInfo& info);
Napi::Value GetImageBufferedVertices(const Napi::CallbackInfo& info);
Napi::Value GetDomainBufferedUvs(const Napi::CallbackInfo& info);
Napi::Value GetImageBufferedUvs(const Napi::CallbackInfo& info);
Napi::Value Engine::GetDomainFaceEdgeAdjacency(const Napi::CallbackInfo& info);
Napi::Value Engine::GetDomainEdgeFaceAdjacency(const Napi::CallbackInfo& info);
Napi::Value Engine::GetImageFaceEdgeAdjacency(const Napi::CallbackInfo& info);
Napi::Value Engine::GetImageEdgeFaceAdjacency(const Napi::CallbackInfo& info);
Napi::Value GetObjectiveFunctionProperty(const Napi::CallbackInfo& info);
Napi::Value SetObjectiveFunctionProperty(const Napi::CallbackInfo& info);
Napi::Value LoadShape(const Napi::CallbackInfo& info);
Napi::Value LoadPartial(const Napi::CallbackInfo& info);
Napi::Value ResumeSolver(const Napi::CallbackInfo& info);
Napi::Value PauseSolver(const Napi::CallbackInfo& info);
Napi::Value SetAlgorithmType(const Napi::CallbackInfo& info);
Napi::Value ConstrainFacePosition(const Napi::CallbackInfo& info);
Napi::Value UpdateConstrainedFacePosition(const Napi::CallbackInfo& info);
Napi::Value UnconstrainFacePosition(const Napi::CallbackInfo& info);
Napi::Value ReconstrainFacePosition(const Napi::CallbackInfo& info);
Napi::Value GetShapeBufferedVertices(const Napi::CallbackInfo& info);
Napi::Value GetPartialBufferedVertices(const Napi::CallbackInfo& info);
Napi::Value GetShapeBufferedFaces(const Napi::CallbackInfo& info);
Napi::Value GetPartialBufferedFaces(const Napi::CallbackInfo& info);
Napi::Value GetV(const Napi::CallbackInfo& info);
Napi::Value GetTau(const Napi::CallbackInfo& info);
Napi::Value GetLambda(const Napi::CallbackInfo& info);
Napi::Value GetMu(const Napi::CallbackInfo& info);
Napi::Value GetValue(const Napi::CallbackInfo& info);
Napi::Value GetIteration(const Napi::CallbackInfo& info);
Napi::Value GetLineSearchIteration(const Napi::CallbackInfo& info);
Napi::Value GetStepSize(const Napi::CallbackInfo& info);
Napi::Value SetInitialStepSize(const Napi::CallbackInfo& info);
/**
* Regular private instance methods
*/
ModelFileType GetModelFileType(std::string filename);
void TryUpdateImageVertices();
Napi::Int32Array GetBufferedFaces(const Napi::CallbackInfo& info, const FacesSource faces_source) const;
Napi::Int32Array GetBufferedEdges(const Napi::CallbackInfo& info, const EdgesSource edges_source) const;
Napi::Float32Array GetBufferedVertices(const Napi::CallbackInfo& info, const VerticesSource vertices_source);
Napi::Int32Array CreateBufferedFacesArray(Napi::Env env, const Eigen::MatrixXi& F) const;
Napi::Int32Array CreateBufferedEdgesArray(Napi::Env env, const Eigen::MatrixXi& E) const;
Napi::Array CreateFaces(Napi::Env env, const Eigen::MatrixX3i& F);
Napi::Array CreateEdges(Napi::Env env, const Eigen::MatrixX2i& E);
Napi::Value NativeToJS(Napi::Env env, const std::any& property_value);
Napi::Value NativeToJS(Napi::Env env, const Eigen::VectorXd& property_value);
Napi::Value NativeToJS(Napi::Env env, const std::vector<RDS::VertexIndex>& property_value);
Napi::Value NativeToJS(Napi::Env env, const double property_value);
Napi::Value NativeToJS(Napi::Env env, const std::string& property_value);
std::any JSToNative(Napi::Env env, const Napi::Value& value);
Napi::Value Engine::GetFaceEdgeAdjacency(const Napi::CallbackInfo& info, const DataSource data_source);
Napi::Value Engine::GetEdgeFaceAdjacency(const Napi::CallbackInfo& info, const DataSource data_source);
AlgorithmType StringToAlgorithmType(const std::string& algorithm_type_string);
Napi::Value CreateObjectiveFunctionDataObject(Napi::Env env, std::shared_ptr<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>> objective_function) const;
std::shared_ptr<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>> GetObjectiveFunctionByName(const std::string& name);
void InitializeSolver();
/**
* Regular private templated instance methods
*/
template <typename Derived>
Napi::Array CreateVerticesArray(Napi::Env env, const Eigen::MatrixBase<Derived>& V)
{
Napi::Array vertices_array = Napi::Array::New(env, V.rows());
auto entries_per_vertex = V.cols();
for (int32_t vertex_index = 0; vertex_index < V.rows(); vertex_index++)
{
Napi::Object vertex_object = Napi::Object::New(env);
float x = V(vertex_index, 0);
float y = V(vertex_index, 1);
vertex_object.Set("x", x);
vertex_object.Set("y", y);
if (entries_per_vertex == 3)
{
float z = V(vertex_index, 2);
vertex_object.Set("z", z);
}
vertices_array[vertex_index] = vertex_object;
}
return vertices_array;
}
template <typename Derived>
Napi::Float32Array CreateBufferedVerticesArray(Napi::Env env, const Eigen::MatrixBase<Derived>& V, const Eigen::MatrixX3i& F)
{
const uint32_t entries_per_face = 9;
const uint32_t entries_per_vertex = V.cols();
Napi::Float32Array buffered_vertices_array = Napi::Float32Array::New(env, entries_per_face * F.rows());
//#pragma omp parallel for
for (int32_t face_index = 0; face_index < F.rows(); face_index++)
{
const int base_index = entries_per_face * face_index;
for (uint32_t i = 0; i < 3; i++)
{
uint32_t vertex_index = F(face_index, i);
const float x = V(vertex_index, 0);
const float y = V(vertex_index, 1);
const float z = entries_per_vertex == 2 ? 0 : V(vertex_index, 2);
const int base_vertex_index = base_index + 3 * i;
buffered_vertices_array[base_vertex_index] = x;
buffered_vertices_array[base_vertex_index + 1] = y;
buffered_vertices_array[base_vertex_index + 2] = z;
}
}
return buffered_vertices_array;
}
template <typename Derived>
Napi::Float32Array CreateBufferedVerticesArray(Napi::Env env, const Eigen::MatrixBase<Derived>& V, const Eigen::MatrixX2i& E)
{
const uint32_t entries_per_edge = 6;
const uint32_t entries_per_vertex = V.cols();
Napi::Float32Array buffered_vertices_array = Napi::Float32Array::New(env, entries_per_edge * E.rows());
//#pragma omp parallel for
for (int32_t edge_index = 0; edge_index < E.rows(); edge_index++)
{
const int base_index = entries_per_edge * edge_index;
for (uint32_t i = 0; i < 2; i++)
{
uint32_t vertex_index = E(edge_index, i);
const float x = V(vertex_index, 0);
const float y = V(vertex_index, 1);
const float z = entries_per_vertex == 2 ? 0 : V(vertex_index, 2);
const int base_vertex_index = base_index + 3 * i;
buffered_vertices_array[base_vertex_index] = x;
buffered_vertices_array[base_vertex_index + 1] = y;
buffered_vertices_array[base_vertex_index + 2] = z;
}
}
return buffered_vertices_array;
}
template <typename Derived>
Napi::Float32Array CreateBufferedVerticesArray(Napi::Env env, const Eigen::MatrixBase<Derived>& V)
{
Napi::Float32Array buffered_vertices_array = Napi::Float32Array::New(env, 3 * V.rows());
const uint32_t entries_per_vertex = V.cols();
//#pragma omp parallel for
for (int32_t vertex_index = 0; vertex_index < V.rows(); vertex_index++)
{
const float x = V(vertex_index, 0);
const float y = V(vertex_index, 1);
const float z = entries_per_vertex == 2 ? 0 : V(vertex_index, 2);
const int base_index = 3 * vertex_index;
buffered_vertices_array[base_index] = x;
buffered_vertices_array[base_index + 1] = y;
buffered_vertices_array[base_index + 2] = z;
}
return buffered_vertices_array;
}
template <typename Derived>
Napi::Float32Array CreateBufferedUvsArray(Napi::Env env, const Eigen::MatrixBase<Derived>& V, const Eigen::MatrixXi& F)
{
const uint32_t entries_per_face = 6;
uint32_t entries_per_vertex = V.cols();
Napi::Float32Array buffered_uvs_array = Napi::Float32Array::New(env, entries_per_face * F.rows());
//#pragma omp parallel for
for (int32_t face_index = 0; face_index < F.rows(); face_index++)
{
const int base_index = entries_per_face * face_index;
for (uint32_t i = 0; i < 3; i++)
{
uint32_t vertex_index = F(face_index, i);
float x = V(vertex_index, 0);
float y = V(vertex_index, 1);
const int base_vertex_index = base_index + 2 * i;
buffered_uvs_array[base_vertex_index] = x;
buffered_uvs_array[base_vertex_index + 1] = y;
}
}
return buffered_uvs_array;
}
/**
* Fields
*/
std::unordered_map<RDS::Face, std::shared_ptr<FacePositionObjective<Eigen::StorageOptions::RowMajor>>, RDS::VectorHash, RDS::VectorEquals> face_to_position_objective_map_;
std::unordered_map<RDS::Face, std::shared_ptr<FaceDataProvider>, RDS::VectorHash, RDS::VectorEquals> face_to_face_data_provider_map_;
std::shared_ptr<MeshWrapper> mesh_wrapper_;
std::shared_ptr<MeshWrapper> mesh_wrapper_shape_;
std::shared_ptr<MeshWrapper> mesh_wrapper_partial_;
std::shared_ptr<PlainDataProvider> plain_data_provider_;
std::shared_ptr<EmptyDataProvider> empty_data_provider_;
std::vector<std::shared_ptr<EdgePairDataProvider>> edge_pair_data_providers_;
std::vector<std::shared_ptr<FaceFanDataProvider>> face_fan_data_providers_;
std::vector<std::shared_ptr<FaceDataProvider>> face_data_providers_;
std::vector<std::shared_ptr<SummationObjective<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>, Eigen::VectorXd>>> summation_objectives_;
std::shared_ptr<SummationObjective<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>, Eigen::VectorXd>> summation_objective_;
std::shared_ptr<SummationObjective<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>, Eigen::VectorXd>> autoquads_summation_objective_;
std::shared_ptr<SummationObjective<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>, Eigen::VectorXd>> autocuts_summation_objective_;
std::shared_ptr<SummationObjective<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>, Eigen::VectorXd>> position_;
std::shared_ptr<Separation<Eigen::StorageOptions::RowMajor>> separation_;
std::shared_ptr<SymmetricDirichlet<Eigen::StorageOptions::RowMajor>> symmetric_dirichlet_;
std::shared_ptr<SeamlessObjective<Eigen::StorageOptions::RowMajor>> seamless_;
std::shared_ptr<SingularPointsPositionObjective<Eigen::StorageOptions::RowMajor>> singular_points_;
std::vector<std::shared_ptr<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>>> objective_functions_;
std::vector<std::shared_ptr<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>>> autocuts_objective_functions_;
std::vector<std::shared_ptr<ObjectiveFunction<Eigen::StorageOptions::RowMajor, Eigen::VectorXd>>> autoquads_objective_functions_;
std::unique_ptr<NewtonMethod<PardisoSolver, Eigen::StorageOptions::RowMajor>> newton_method_;
std::unique_ptr<ProjectedGradientDescent<Eigen::StorageOptions::RowMajor>> projected_gradient_descent_;
std::vector<Eigen::DenseIndex> constrained_faces_indices;
Eigen::MatrixX2d image_vertices_;
std::unordered_map<std::string, uint32_t> properties_map_;
std::unordered_map<std::string, uint32_t> property_modifiers_map_;
std::shared_ptr<RegionLocalizationObjective<Eigen::StorageOptions::RowMajor>> region_localization_;
bool shape_ready_;
bool partial_ready_;
};
#endif |
data.c | // SPDX-License-Identifier: BSD-2-Clause
/*
Copyright 1999-2016 Bernard Parent
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <src/data.h>
#include <model/_model.h>
#include <cycle/_cycle.h>
#define dt_steady 1.0e99
#define DATATYPE_BINARY 1
#define DATATYPE_ASCII 2
#ifdef _3DL
#define SUBZONE_DESIRED_WIDTH 40
#else
#define SUBZONE_DESIRED_WIDTH 40
#endif
#define MIN_NUMSUBZONE_PER_THREAD 3
void find_NODEVALID_on_domain_all(np_t *np, gl_t *gl, int TYPELEVEL, bool *NODEVALID){
long i,j,k;
#ifdef DISTMPI
int rank,thisrank;
int THISNODEVALID;
#endif
for_ijk(gl->domain_lim_all,is,js,ks,ie,je,ke){
NODEVALID[_ai_all(gl,i,j,k)]=FALSE;
}
#ifdef DISTMPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD);
thisrank=_node_rank(gl, i, j, k);
if (thisrank==rank) THISNODEVALID=(int)(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL));
MPI_Bcast(&THISNODEVALID,1,MPI_INT,thisrank,MPI_COMM_WORLD);
assert(THISNODEVALID==TRUE || THISNODEVALID==FALSE);
NODEVALID[_ai_all(gl,i,j,k)]=(bool)THISNODEVALID;
}
MPI_Barrier(MPI_COMM_WORLD);
#else
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
NODEVALID[_ai_all(gl,i,j,k)]=is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL);
}
#endif
}
void read_data_file_binary_ascii(char *filename, np_t *np, gl_t *gl, long level, int DATATYPE){
FILE *datafile;
char data_format_str[100];
long i,j,k,flux,cnt,tmp1,tmp2,tmp3;
double CFLmem;
#ifdef _RESTIME_STORAGE_TRAPEZOIDAL
flux_t Res;
bool NORES=FALSE;
long NOREScount=0;
#endif
#ifndef UNSTEADY
double tmp_double;
#endif
bool FORMAT010;
bool *NODEVALID;
flux_t U;
#ifdef EMFIELD
double Lcmem;
fluxemfield_t Uemfield;
#endif
#ifdef DISTMPI
int rank,numproc;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Barrier(MPI_COMM_WORLD);
#endif
NODEVALID=(bool *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1)
#ifdef _2DL
*(gl->domain_lim_all.je-gl->domain_lim_all.js+1)
#endif
#ifdef _3DL
*(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1)
#endif
*sizeof(bool));
CFLmem=gl->CFL;
#ifdef EMFIELD
Lcmem=gl->Lc;
#endif
datafile = fopen(filename, "r");
if (datafile==NULL)
fatal_error("Having problems opening datafile %s.",filename);
for (cnt=0; cnt<16; cnt++) {
if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) fatal_error("Problem with fscanf in read_data_file_binary().");
}
data_format_str[16]=EOS;
wfprintf(stdout,"Reading data file %s ",filename);
if (level!=0) wfprintf(stdout,"to time level minus %ld ",level);
FORMAT010=FALSE;
switch (DATATYPE){
case DATATYPE_BINARY:
if (strcmp("WARPBINFORMAT010",data_format_str)==0) {
wfprintf(stdout,"in CFDWARP binary format 010..");
FORMAT010=TRUE;
}
break;
case DATATYPE_ASCII:
if (strcmp("WARPASCFORMAT010",data_format_str)==0) {
wfprintf(stdout,"in CFDWARP ASCII format 010..");
FORMAT010=TRUE;
}
break;
}
if (FORMAT010) {
if (level==0) {
if (fscanf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg",
&(gl->window.is),&(gl->window.ie),
&(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL))!=6) fatal_error("Problem with fscanf in read_data_file_binary().");
if (fscanf(datafile," nd=%ld ns=%ld nf=%ld",
&tmp1,&tmp2,
&tmp3)!=3) fatal_error("Problem with fscanf in read_data_file_binary().");
if (tmp1!=nd) fatal_error("Data file has %ld dimensions but CFDWARP is compiled with %ld dimensions.",tmp1,nd);
if (tmp2!=ns) fatal_error("Data file has %ld species but CFDWARP is compiled with %ld species.",tmp2,ns);
if (tmp3!=nf) fatal_error("Data file has %ld fluxes but CFDWARP is compiled with %ld fluxes.",tmp3,nf);
if (fscanf(datafile," is=%ld ie=%ld",
&tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary().");
if ((tmp2-tmp1)!=(gl->domain_all.ie-gl->domain_all.is)) fatal_error("Data file has %ld grid lines along i but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ie-gl->domain_all.is));
#ifdef _2DL
if (fscanf(datafile," js=%ld je=%ld",
&tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary().");
if ((tmp2-tmp1)!=(gl->domain_all.je-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along j but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.je-gl->domain_all.js));
#endif
#ifdef _3DL
if (fscanf(datafile," ks=%ld ke=%ld",
&tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary().");
if ((tmp2-tmp1)!=(gl->domain_all.ke-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along k but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ke-gl->domain_all.ks));
#endif
#if defined(UNSTEADY)
if (fscanf(datafile," time=%lg",&(gl->time))!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_binary().");
#else
if (fscanf(datafile," time=%lg",&tmp_double)!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_binary().");
#endif
#ifdef UNSTEADY
if (fscanf(datafile," dt=%lg",&(gl->dt))!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_binary().");
#else
if (fscanf(datafile," dt=%lg",&tmp_double)!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_binary().");
#endif
#ifdef EMFIELD
if (fscanf(datafile," Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg",&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=3) fatal_error("Problem reading EMFIELD variables within fscanf in read_data_file_binary().");
#endif
if (fscanf(datafile,"%*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_binary().");
} else {
if (fscanf(datafile," %*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_binary().");
}
fgetc(datafile);
}
if (!FORMAT010){
fatal_error("Data file format invalid.");
}
wfprintf(stdout,"fluid.");
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID);
wfprintf(stdout,".");
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
#ifdef DISTMPI
if (rank==0) {
#endif
if (NODEVALID[_ai_all(gl,i,j,k)]) {
switch (DATATYPE){
case DATATYPE_BINARY:
if (fread(U, sizeof(flux_t), 1, datafile)!=1)
fatal_error("Could not read all data properly.");
break;
case DATATYPE_ASCII:
for (flux=0; flux<nf; flux++){
if (fscanf(datafile,"%lg%*[^\n]",&(U[flux]))!=1)
fatal_error("Could not read all data properly.");
}
break;
default:
fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY.");
}
}
#ifdef DISTMPI
}
MPI_Bcast_Node(&U, nf, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl);
if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD);
#endif
if (is_node_in_zone(i,j,k,gl->domain_lim)) {
for (flux=0; flux<nf; flux++){
if (level==0) np[_ai(gl,i,j,k)].bs->U[flux]=U[flux];
#ifdef UNSTEADY
if (level==1) np[_ai(gl,i,j,k)].bs->Um1[flux]=U[flux];
#if _RESTIME_BW > 2
if (level==2) np[_ai(gl,i,j,k)].bs->Um2[flux]=U[flux];
#endif
#if _RESTIME_BW > 3
if (level==3) np[_ai(gl,i,j,k)].bs->Um3[flux]=U[flux];
#endif
#endif
}
np[_ai(gl,i,j,k)].INIT_FLUID=TRUE;
}
}
#ifdef EMFIELD
wfprintf(stdout,"emfield.");
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_EMFIELD, NODEVALID);
wfprintf(stdout,".");
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
#ifdef DISTMPI
if (rank==0) {
#endif
if (NODEVALID[_ai_all(gl,i,j,k)]) {
switch (DATATYPE){
case DATATYPE_BINARY:
if (fread(Uemfield, sizeof(fluxemfield_t), 1, datafile)!=1)
fatal_error("Could not read all data properly.");
break;
case DATATYPE_ASCII:
for (flux=0; flux<nfe; flux++){
if (fscanf(datafile,"%lg%*[^\n]",&(Uemfield[flux]))!=1)
fatal_error("Could not read all data properly.");
}
break;
default:
fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY.");
}
}
#ifdef DISTMPI
}
MPI_Bcast_Node(&Uemfield, nfe, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl);
if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD);
#endif
if (is_node_in_zone(i,j,k,gl->domain_lim)) {
for (flux=0; flux<nfe; flux++) {
if (level==0) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Uemfield[flux];
#ifdef UNSTEADY
if (level==1) np[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=Uemfield[flux];
#endif
}
np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE;
}
}
#endif
#ifdef _RESTIME_STORAGE_TRAPEZOIDAL
wfprintf(stdout,"trap.");
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID);
wfprintf(stdout,".");
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
#ifdef DISTMPI
if (rank==0) {
#endif
if (NODEVALID[_ai_all(gl,i,j,k)]) {
switch (DATATYPE){
case DATATYPE_BINARY:
if (fread(Res, sizeof(flux_t), 1, datafile)!=1){
NORES=TRUE;
NOREScount++;
}
break;
case DATATYPE_ASCII:
for (flux=0; flux<nf; flux++){
if (fscanf(datafile,"%lg%*[^\n]",&(Res[flux]))!=1){
NORES=TRUE;
NOREScount++;
}
}
break;
default:
fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY.");
}
}
#ifdef DISTMPI
}
MPI_Bcast(&NORES, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD);
if (!NORES) MPI_Bcast_Node(&Res, nf, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl);
if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD);
#endif
if (is_node_in_zone(i,j,k,gl->domain_lim)) {
for (flux=0; flux<nf; flux++){
if (!NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=Res[flux];
else if (NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=0.0;
}
}
NORES=FALSE;
}
if(NOREScount>0) wfprintf(stdout,"WARNING: The residual at the previous time step could not be found within the data file %s. The residual has been set to zero..",filename);
#endif
fclose(datafile);
wfprintf(stdout,"done;\n");
if (level!=0) gl->CFL=CFLmem;
#ifdef EMFIELD
if (level!=0) gl->Lc=Lcmem;
#endif
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
if (rank!=0) {
gl->effiter_U=0.0;
gl->effiter_R=0.0;
#ifdef EMFIELD
gl->effiter_U_emfield=0.0;
gl->effiter_R_emfield=0.0;
#endif
}
#endif
free(NODEVALID);
}
void write_data_file_binary_ascii(char *filename, np_t *np, gl_t *gl, int DATATYPE){
FILE *datafile;
long i,j,k;
flux_t *fluxtmp;
bool *NODEVALID;
#ifdef EMFIELD
double effiter_U_emfield,effiter_R_emfield;
#endif
long flux;
double effiter_U,effiter_R;
#ifdef DISTMPI
zone_t domain;
flux_t U;
int rank,proc,numproc;
MPI_Status MPI_Status1;
#endif
#ifdef EMFIELD
assert(nf>=nfe);
#endif
fluxtmp=(flux_t *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1)
#ifdef _2DL
*(gl->domain_lim_all.je-gl->domain_lim_all.js+1)
#endif
#ifdef _3DL
*(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1)
#endif
*sizeof(flux_t));
NODEVALID=(bool *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1)
#ifdef _2DL
*(gl->domain_lim_all.je-gl->domain_lim_all.js+1)
#endif
#ifdef _3DL
*(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1)
#endif
*sizeof(bool));
effiter_U=gl->effiter_U;
effiter_R=gl->effiter_R;
#ifdef EMFIELD
effiter_U_emfield=gl->effiter_U_emfield;
effiter_R_emfield=gl->effiter_R_emfield;
#endif
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#ifdef EMFIELD
MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#endif
#endif
datafile = wfopen(filename, "w");
wfprintf(stdout,"Writing to CFDWARP ");
switch (DATATYPE){
case DATATYPE_BINARY:
wfprintf(stdout,"binary");
wfprintf(datafile,"WARPBINFORMAT010");
break;
case DATATYPE_ASCII:
wfprintf(stdout,"ASCII");
wfprintf(datafile,"WARPASCFORMAT010");
break;
default:
fatal_error("DATATYPE must be either DATATYPE_BINARY or DATATYPE_ASCII.");
}
wfprintf(stdout," data file %s..",filename);
wfprintf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E",
gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL);
wfprintf(datafile," nd=%ld ns=%ld nf=%ld",nd,ns,nf);
wfprintf(datafile," is=%ld ie=%ld",gl->domain_all.is,gl->domain_all.ie);
#ifdef _2DL
wfprintf(datafile," js=%ld je=%ld",gl->domain_all.js,gl->domain_all.je);
#endif
#ifdef _3DL
wfprintf(datafile," ks=%ld ke=%ld",gl->domain_all.ks,gl->domain_all.ke);
#endif
#if defined(UNSTEADY)
wfprintf(datafile," time=%E",gl->time);
#else
wfprintf(datafile," time=%E",0.0);
#endif
#ifdef UNSTEADY
wfprintf(datafile," dt=%E",gl->dt);
#else
wfprintf(datafile," dt=%E",dt_steady);
#endif
#ifdef EMFIELD
wfprintf(datafile," Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E",gl->Lc,effiter_U_emfield,effiter_R_emfield);
#endif
wfprintf(datafile,"\n");
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID);
#ifdef DISTMPI
for (proc=0; proc<numproc; proc++){
domain=_domain_from_rank(proc,gl);
for_ijk(domain,is,js,ks,ie,je,ke){
if (proc==rank){
for (flux=0; flux<nf; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->U[flux];
if (proc!=0) {
MPI_Send(U,nf,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
}
}
if (rank==0 && proc!=0) {
MPI_Recv(U,nf,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1);
}
for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux];
}
MPI_Barrier(MPI_COMM_WORLD);
}
#else
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->U[flux];
}
#endif
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
if (NODEVALID[_ai_all(gl,i,j,k)]) {
switch (DATATYPE){
case DATATYPE_BINARY:
wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile);
break;
case DATATYPE_ASCII:
for (flux=0; flux<nf; flux++)
wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]);
break;
default:
fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY.");
}
}
}
#ifdef EMFIELD
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_EMFIELD, NODEVALID);
#ifdef DISTMPI
for (proc=0; proc<numproc; proc++){
domain=_domain_from_rank(proc,gl);
for_ijk(domain,is,js,ks,ie,je,ke){
if (proc==rank){
for (flux=0; flux<nfe; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux];
if (proc!=0) {
MPI_Send(U,nfe,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
}
}
if (rank==0 && proc!=0) {
MPI_Recv(U,nfe,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1);
}
for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux];
}
MPI_Barrier(MPI_COMM_WORLD);
}
#else
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux];
}
#endif
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
if (NODEVALID[_ai_all(gl,i,j,k)]) {
switch (DATATYPE){
case DATATYPE_BINARY:
wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(fluxemfield_t), 1, datafile);
break;
case DATATYPE_ASCII:
for (flux=0; flux<nfe; flux++)
wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]);
break;
default:
fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY.");
}
}
}
#endif
#ifdef _RESTIME_STORAGE_TRAPEZOIDAL
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID);
#ifdef DISTMPI
for (proc=0; proc<numproc; proc++){
domain=_domain_from_rank(proc,gl);
for_ijk(domain,is,js,ks,ie,je,ke){
if (proc==rank){
for (flux=0; flux<nf; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux];
if (proc!=0) {
MPI_Send(U,nf,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
}
}
if (rank==0 && proc!=0) {
MPI_Recv(U,nf,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1);
}
for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux];
}
MPI_Barrier(MPI_COMM_WORLD);
}
#else
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux];
}
#endif
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
if (NODEVALID[_ai_all(gl,i,j,k)]) {
switch (DATATYPE){
case DATATYPE_BINARY:
wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile);
break;
case DATATYPE_ASCII:
for (flux=0; flux<nf; flux++)
wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]);
break;
default:
fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY.");
}
}
}
#endif
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
wfclose(datafile);
wfprintf(stdout,"done.\n");
free(fluxtmp);
free(NODEVALID);
}
void write_data_file(np_t *np, gl_t *gl){
char *tmp_filename,*tmp_filename2;
#ifdef DISTMPI
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
tmp_filename=(char *)malloc((strlen(gl->output_filename)+10)*sizeof(char));
tmp_filename2=(char *)malloc((strlen(gl->output_filename)+10)*sizeof(char));
strcpy(tmp_filename,gl->output_filename);
SOAP_strins(".wbak",&tmp_filename,strlen(tmp_filename));
strcpy(tmp_filename2,gl->output_filename);
SOAP_strins(".wbak2",&tmp_filename2,strlen(tmp_filename2));
#ifdef DISTMPI
if (rank==0) {
#endif
rename(tmp_filename,tmp_filename2);
rename(gl->output_filename,tmp_filename);
#ifdef DISTMPI
}
#endif
if (gl->OUTPUTASCII) {
write_data_file_binary_ascii(gl->output_filename, np, gl, DATATYPE_ASCII);
} else {
if (gl->OUTPUTINTERPOLATION){
write_data_file_interpolation(gl->output_filename, np, gl);
} else {
write_data_file_binary_ascii(gl->output_filename, np, gl, DATATYPE_BINARY);
}
}
free(tmp_filename);
free(tmp_filename2);
}
void read_data_file(input_t input, np_t *np, gl_t *gl){
#ifdef UNSTEADY
long i,j,k;
long flux;
#endif
long spec;
gl->nsinit=ns;
for (spec=0; spec<ns; spec++) gl->initspecies[spec]=spec;
if (input.READDATAFILE) {
if (input.ASCII) {
read_data_file_binary_ascii(input.name, np, gl, 0, DATATYPE_ASCII);
} else {
if (input.INTERPOLATION){
read_data_file_interpolation(input.name, np, gl);
} else {
read_data_file_binary_ascii(input.name, np, gl, 0, DATATYPE_BINARY);
}
}
gl->iter=max(gl->iter,1);
gl->INIT_FLUID_READ=TRUE;
gl->INIT_EMFIELD_READ=TRUE;
}
#ifdef UNSTEADY
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
for (flux=0; flux<nf; flux++){
(np)[_ai(gl,i,j,k)].bs->Um1[flux]=(np)[_ai(gl,i,j,k)].bs->U[flux];
}
#ifdef EMFIELD
for (flux=0; flux<nfe; flux++){
(np)[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=(np)[_ai(gl,i,j,k)].bs->Uemfield[flux];
}
#endif
}
if (input.M1) read_data_file_binary_ascii(input.name_m1, np, gl, 1, DATATYPE_BINARY);
#if _RESTIME_BW > 2
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
for (flux=0; flux<nf; flux++){
(np)[_ai(gl,i,j,k)].bs->Um2[flux]=(np)[_ai(gl,i,j,k)].bs->Um1[flux];
}
}
if (input.M2) read_data_file_binary_ascii(input.name_m2, np, gl, 2, DATATYPE_BINARY);
#endif
#if _RESTIME_BW > 3
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
for (flux=0; flux<nf; flux++){
(np)[_ai(gl,i,j,k)].bs->Um3[flux]=(np)[_ai(gl,i,j,k)].bs->Um2[flux];
}
}
if (input.M3) read_data_file_binary_ascii(input.name_m3, np, gl, 3, DATATYPE_BINARY);
#endif
#endif
}
void find_interpolation_weight(np_t *np, gl_t *gl, long l, dim_t x_file, dim_t dx1_file,
#ifdef _2DL
dim_t dx2_file,
#endif
#ifdef _3DL
dim_t dx3_file,
#endif
double radiusmax2, double *thisweight){
double distance;
EXM_mat_t mat1,mat2,mat3,mat1inv;
long dim;
distance=0.0;
for (dim=0; dim<nd; dim++) distance+=sqr(_x(np[l],dim)-x_file[dim]);
*thisweight=0.0;
if (distance<radiusmax2) {
EXM_init_matrix(&mat1, nd, nd);
for (dim=0; dim<nd; dim++){
mat1.cont[EXM_aim(mat1.glm,dim,0)]=dx1_file[dim];
#ifdef _2DL
mat1.cont[EXM_aim(mat1.glm,dim,1)]=dx2_file[dim];
#endif
#ifdef _3DL
mat1.cont[EXM_aim(mat1.glm,dim,2)]=dx3_file[dim];
#endif
}
EXM_init_matrix(&mat1inv, nd, nd);
EXM_invert_matrix_analytical(mat1, &mat1inv);
EXM_init_matrix(&mat2, nd, 1);
for (dim=0; dim<nd; dim++){
mat2.cont[EXM_aim(mat2.glm,dim,0)]=_x(np[l],dim)-x_file[dim];
}
EXM_init_matrix(&mat3, nd, 1);
EXM_multiply_matrices(mat1inv, mat2, &mat3);
*thisweight=0.0;
//for (dim=0; dim<nd; dim++) thisweight=max(thisweight,fabs(mat3.cont[EXM_aim(mat3.glm,dim,0)]));
for (dim=0; dim<nd; dim++) *thisweight+=fabs(pow(fabs(mat3.cont[EXM_aim(mat3.glm,dim,0)]),3.0));
*thisweight=fabs(pow(*thisweight,1.0/3.0));
*thisweight=max(1e-16,max(0.0003-(*thisweight)*0.00001,1.0-(*thisweight)));
EXM_free_matrix(&mat1);
EXM_free_matrix(&mat1inv);
EXM_free_matrix(&mat2);
EXM_free_matrix(&mat3);
}
}
bool is_interpolation_occurring_in_zone(np_t *np, gl_t *gl, int TYPELEVEL, dim_t x_file, double radiusmax2, zone_t zone, long *i, long *j, long *k){
long dim;
double distance;
bool FOUND;
FOUND=FALSE;
// fprintf(stderr,"zone.is=%ld zone.ie=%ld\n",zone.is,zone.ie);
//if (zone.ie>gl->domain_all.ie) fatal_error("problem here..");
//if (zone.is<gl->domain_all.is) fatal_error("problem here..");
*i=zone.is-1;
do {
(*i)++;
*j=zone.js-1;
do {
(*j)++;
#ifdef _3DL
*k=zone.ks-1;
do {
(*k)++;
#endif
if (is_node_valid(np[_ai(gl,*i,*j,*k)],TYPELEVEL)){
distance=0.0;
for (dim=0; dim<nd; dim++) distance+=sqr(_x(np[_ai(gl,*i,*j,*k)],dim)-x_file[dim]);
if (distance<radiusmax2) {
//if (zone.is==zone.ie) fprintf(stderr,"[[%E %E %ld,%ld,%ld %E %E]]\n",distance,radiusmax2,*i,*j,*k,_x(np[_ai(gl,*i,*j,*k)],0),x_file[0]);
FOUND=TRUE;
}
}
#ifdef _3DL
} while(!FOUND && *k<zone.ke);
#endif
} while(!FOUND && *j<zone.je);
} while(!FOUND && *i<zone.ie);
return(FOUND);
}
bool find_interpolation_zone(np_t *np, gl_t *gl, int TYPELEVEL, dim_t x_file, double radiusmax2, zone_t *zone){
long i,j,k,offset,imem;
bool FOUNDWITHIN,FOUNDLEFT,FOUNDRIGHT,FOUND;
zone_t istationzone,domain_eff;
domain_eff=_zone_intersection(gl->domain_all,gl->domain_lim);
FOUNDWITHIN=FALSE;
FOUNDLEFT=FALSE;
FOUNDRIGHT=FALSE;
istationzone=domain_eff;
if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,*zone,&i,&j,&k)){
istationzone.is=i;
istationzone.ie=i;
FOUNDWITHIN=TRUE;
} else {
offset=0;
do {
offset++;
if (zone->ie+offset<=domain_eff.ie) {
istationzone.ie=zone->ie+offset;
istationzone.is=zone->ie+offset;
if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k)){
FOUNDLEFT=TRUE;
}
}
if (zone->is-offset>=domain_eff.is && !FOUNDLEFT) {
istationzone.ie=zone->is-offset;
istationzone.is=zone->is-offset;
if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k)){
FOUNDRIGHT=TRUE;
}
}
} while (!FOUNDLEFT && !FOUNDRIGHT && offset<=(domain_eff.ie-domain_eff.is+1) );
}
if (FOUNDRIGHT || FOUNDLEFT || FOUNDWITHIN){
imem=istationzone.is;
if (FOUNDRIGHT){
zone->ie=imem;
} else {
FOUND=TRUE;
while (istationzone.ie<domain_eff.ie && FOUND) {
istationzone.ie++;
istationzone.is++;
FOUND=is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k);
}
if (!FOUND) zone->ie=istationzone.ie-1; else zone->ie=domain_eff.ie;
}
if (FOUNDLEFT){
zone->is=imem;
} else {
istationzone.is=imem;
istationzone.ie=imem;
FOUND=TRUE;
while (istationzone.is>domain_eff.is && FOUND) {
istationzone.is--;
istationzone.ie--;
FOUND=is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k);
}
if (!FOUND) zone->is=istationzone.is+1; else zone->is=domain_eff.is;
}
}
return(FOUNDRIGHT || FOUNDLEFT || FOUNDWITHIN);
}
bool is_data_point_in_domain(dim_t x_file, dim_t xmin, dim_t xmax, double radiusmax2){
bool INDOMAIN;
long dim;
INDOMAIN=TRUE;
for (dim=0; dim<nd; dim++) {
if (INDOMAIN) {
if (x_file[dim]<xmin[dim] && sqr(x_file[dim]-xmin[dim])>radiusmax2) INDOMAIN=FALSE;
}
}
for (dim=0; dim<nd; dim++) {
if (INDOMAIN) {
if (x_file[dim]>xmax[dim] && sqr(x_file[dim]-xmax[dim])>radiusmax2) INDOMAIN=FALSE;
}
}
return(INDOMAIN);
}
void read_data_file_interpolation(char *filename, np_t *np, gl_t *gl){
FILE *datafile;
char data_format_str[100];
long i,j,k,l_file,cnt,dim,cntzone;
long numsubzone, numflux_read,numspec_read,numdim_read,numnodes;
double tmp_dt,tmp_time;
double *weight,*radiusmax2_file,thisweight;
zone_t *subzone;
dim_t *xmin,*xmax;
initvar_t *initvar;
initvar_t *initvar_file;
zone_t zone;
long numsubzone_desired;
#ifdef EMFIELD
initvar_emfield_t *initvar_emfield;
initvar_emfield_t *initvar_emfield_file;
#endif
bool FORMAT001;
dim_t *dx1_file,*x_file;
#ifdef _2DL
dim_t *dx2_file;
#endif
#ifdef _3DL
dim_t *dx3_file;
#endif
int cnterror;
#ifdef OPENMPTHREADS
omp_lock_t *nodelock;
#endif
#ifdef DISTMPI
int rank,numproc;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Barrier(MPI_COMM_WORLD);
#endif
weight=(double *)malloc(sizeof(double)*(gl->domain_lim.ie+4)
#ifdef _2DL
*(gl->domain_lim.je+4)
#endif
#ifdef _3DL
*(gl->domain_lim.ke+4)
#endif
);
#ifdef OPENMPTHREADS
nodelock=(omp_lock_t *)malloc(sizeof(double)*(gl->domain_lim.ie+4)
#ifdef _2DL
*(gl->domain_lim.je+4)
#endif
#ifdef _3DL
*(gl->domain_lim.ke+4)
#endif
);
#endif
datafile = fopen(filename, "r");
if (datafile==NULL)
fatal_error("Having problems opening interpolation datafile %s.",filename);
/* first do the fluid properties */
initvar=(initvar_t *)malloc(sizeof(initvar_t)*(gl->domain_lim.ie+4)
#ifdef _2DL
*(gl->domain_lim.je+4)
#endif
#ifdef _3DL
*(gl->domain_lim.ke+4)
#endif
);
for (cnt=0; cnt<16; cnt++){
if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) {
fatal_error("Problem with fscanf in read_data_file_interpolation().");
}
}
data_format_str[16]=EOS;
wfprintf(stdout,"Reading interpolation data file %s ",filename);
FORMAT001=FALSE;
if (strcmp("WARPINTFORMAT001",data_format_str)==0) {
wfprintf(stdout,"in CFDWARP format 001..");
FORMAT001=TRUE;
}
if (FORMAT001) {
if (fscanf(datafile," numnodes=%ld nf=%ld nd=%ld ns=%ld windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg time=%lg dt=%lg%*[^\n]",
&numnodes,&numflux_read,&numdim_read,&numspec_read,&(gl->window.is),&(gl->window.ie),
&(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL),&(tmp_time),&tmp_dt)!=12) fatal_error("Problem reading interpolation data file.");
#ifdef UNSTEADY
gl->time=tmp_time;
gl->dt=tmp_dt;
#endif
fgetc(datafile);
if (numdim_read!=nd) fatal_error("Number of dimensions read (%ld) does not equal current number of dimensions (%ld).",numdim_read,nd);
if (numspec_read!=ns) fatal_error("Number of species read (%ld) does not equal current number of species (%ld).",numspec_read,ns);
if (numflux_read!=nf) fatal_error("Number of fluxes read (%ld) does not equal current number of fluxes (%ld).",numflux_read,nf);
} else {
fatal_error("Interpolation file format unknown.");
}
/* read data and store in ram */
initvar_file=(initvar_t *)malloc(numnodes*sizeof(initvar_t));
x_file=(dim_t *)malloc(numnodes*sizeof(dim_t));
dx1_file=(dim_t *)malloc(numnodes*sizeof(dim_t));
#ifdef _2DL
dx2_file=(dim_t *)malloc(numnodes*sizeof(dim_t));
#endif
#ifdef _3DL
dx3_file=(dim_t *)malloc(numnodes*sizeof(dim_t));
#endif
radiusmax2_file=(double *)malloc(numnodes*sizeof(double));
for (l_file=0; l_file<numnodes; l_file++){
cnterror=0;
if (fread(initvar_file[l_file], sizeof(initvar_t), 1, datafile)!=1) cnterror++;
if (fread(x_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
if (fread(dx1_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
#ifdef _2DL
if (fread(dx2_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
#endif
#ifdef _3DL
if (fread(dx3_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
#endif
if (cnterror>0) fatal_error("Could not read all data properly.");
radiusmax2_file[l_file]=0.0e0;
for (dim=0; dim<nd; dim++)
radiusmax2_file[l_file]+=sqr(fabs(dx1_file[l_file][dim])
#ifdef _2DL
+fabs(dx2_file[l_file][dim])
#endif
#ifdef _3DL
+fabs(dx3_file[l_file][dim])
#endif
);
radiusmax2_file[l_file]*=1.1;
}
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
weight[_ai(gl,i,j,k)]=0.0e0;
#ifdef OPENMPTHREADS
omp_init_lock(&(nodelock[_ai(gl,i,j,k)]));
#endif
for (cnt=0; cnt<numinitvar; cnt++) (initvar[_ai(gl,i,j,k)])[cnt]=0.0;
}
zone=_zone_intersection(gl->domain_all,gl->domain_lim);
subzone=(zone_t *)malloc(sizeof(zone_t));
find_subzones_in_zone_given_zonelength(SUBZONE_DESIRED_WIDTH, zone, &numsubzone, &subzone);
#ifdef OPENMPTHREADS
numsubzone_desired=MIN_NUMSUBZONE_PER_THREAD*omp_get_max_threads();
#else
numsubzone_desired=MIN_NUMSUBZONE_PER_THREAD;
#endif
if (numsubzone<numsubzone_desired)
find_subzones_in_zone_given_numsubzone(zone, numsubzone_desired, &numsubzone, &subzone);
xmin=(dim_t *)malloc(numsubzone*sizeof(dim_t));
xmax=(dim_t *)malloc(numsubzone*sizeof(dim_t));
for (cntzone=0; cntzone<numsubzone; cntzone++){
for (dim=0; dim<nd; dim++){
xmin[cntzone][dim]=1e99;
xmax[cntzone][dim]=-1e99;
}
for_ijk(subzone[cntzone],is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)){
for (dim=0; dim<nd; dim++){
xmin[cntzone][dim]=min(xmin[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim));
xmax[cntzone][dim]=max(xmax[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim));
}
}
}
}
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
wfprintf(stdout,"Fluid/%ld",numsubzone*numproc);
#else
wfprintf(stdout,"Fluid/%ld",numsubzone);
#endif
#if defined(OPENMPTHREADS)
#pragma omp parallel for private(l_file,cntzone,dim,zone,i,j,k,cnt,thisweight) schedule(dynamic)
#endif
for (cntzone=0; cntzone<numsubzone; cntzone++){
for (l_file=0; l_file<numnodes; l_file++){
if (is_data_point_in_domain(x_file[l_file],xmin[cntzone],xmax[cntzone],radiusmax2_file[l_file])){
zone=subzone[cntzone];
if (find_interpolation_zone(np,gl,TYPELEVEL_FLUID,x_file[l_file],radiusmax2_file[l_file],&zone)){
for_jik(zone,is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)){
find_interpolation_weight(np,gl,_ai(gl,i,j,k),x_file[l_file],dx1_file[l_file],
#ifdef _2DL
dx2_file[l_file],
#endif
#ifdef _3DL
dx3_file[l_file],
#endif
radiusmax2_file[l_file],&thisweight);
#ifdef OPENMPTHREADS
omp_set_lock(&(nodelock[_ai(gl,i,j,k)]));
#endif
if (thisweight>1e-99) {
weight[_ai(gl,i,j,k)]+=thisweight;
for (cnt=0; cnt<numinitvar; cnt++)
initvar[_ai(gl,i,j,k)][cnt]+=thisweight*initvar_file[l_file][cnt];
}
#ifdef OPENMPTHREADS
omp_unset_lock(&(nodelock[_ai(gl,i,j,k)]));
#endif
}
}
}
}
}
fprintf(stdout,".");
fflush(stdout);
// if (mod(cntzone,numsubzone/100+1)==0) wfprintf(stdout,".");
}
#ifdef OPENMPTHREADS
#pragma omp parallel for private(i,j,k,cnt) schedule(static)
#endif
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
if (weight[_ai(gl,i,j,k)]>1e-99 && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) {
for (cnt=0; cnt<numinitvar; cnt++) initvar[_ai(gl,i,j,k)][cnt]=initvar[_ai(gl,i,j,k)][cnt]/weight[_ai(gl,i,j,k)];
init_node_fluid(np,_ai(gl,i,j,k), gl, defaultinitvartypefluid, initvar[_ai(gl,i,j,k)]);
np[_ai(gl,i,j,k)].INIT_FLUID=TRUE;
}
}
free(initvar);
free(initvar_file);
/* second do the emfield properties */
#ifdef EMFIELD
initvar_emfield=(initvar_emfield_t *)malloc(sizeof(initvar_emfield_t)*(gl->domain_lim.ie+4)
#ifdef _2DL
*(gl->domain_lim.je+4)
#endif
#ifdef _3DL
*(gl->domain_lim.ke+4)
#endif
);
for (cnt=0; cnt<16; cnt++){
if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1){
fatal_error("Problem with fscanf in emfield part of read_data_file_interpolation().");
}
}
data_format_str[16]=EOS;
FORMAT001=FALSE;
if (strcmp("WARPINTFORMAT001",data_format_str)==0) {
FORMAT001=TRUE;
}
if (FORMAT001) {
if (fscanf(datafile," numnodes_emfield=%ld nfe=%ld nd=%ld Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg%*[^\n]",
&numnodes,&numflux_read,&numdim_read,&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=6){
fatal_error("Problem reading emfield preambule in interpolating file.");
}
fgetc(datafile);
if (numdim_read!=nd) fatal_error("Number of dimensions read (%ld) does not equal current number of dimensions (%ld).",numdim_read,nd);
if (numflux_read!=nfe) fatal_error("Number of fluxes read (%ld) does not equal current number of emfield fluxes (%ld).",numflux_read,nfe);
gl->Lc=1.0e0;
} else {
fatal_error("Interpolation file format unknown for EMfield variables.");
}
/* read data and store in ram */
initvar_emfield_file=(initvar_emfield_t *)malloc(numnodes*sizeof(initvar_emfield_t));
x_file=(dim_t *)realloc(x_file,numnodes*sizeof(dim_t));
dx1_file=(dim_t *)realloc(dx1_file,numnodes*sizeof(dim_t));
#ifdef _2DL
dx2_file=(dim_t *)realloc(dx2_file,numnodes*sizeof(dim_t));
#endif
#ifdef _3DL
dx3_file=(dim_t *)realloc(dx3_file,numnodes*sizeof(dim_t));
#endif
radiusmax2_file=(double *)realloc(radiusmax2_file,numnodes*sizeof(double));
for (l_file=0; l_file<numnodes; l_file++){
cnterror=0;
if (fread(initvar_emfield_file[l_file], sizeof(initvar_emfield_t), 1, datafile)!=1) cnterror++;
if (fread(x_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
if (fread(dx1_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
#ifdef _2DL
if (fread(dx2_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
#endif
#ifdef _3DL
if (fread(dx3_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++;
#endif
if (cnterror>0) fatal_error("Could not read all data properly.");
radiusmax2_file[l_file]=0.0e0;
for (dim=0; dim<nd; dim++)
radiusmax2_file[l_file]+=sqr(fabs(dx1_file[l_file][dim])
#ifdef _2DL
+fabs(dx2_file[l_file][dim])
#endif
#ifdef _3DL
+fabs(dx3_file[l_file][dim])
#endif
);
radiusmax2_file[l_file]*=1.1;
}
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
weight[_ai(gl,i,j,k)]=0.0e0;
for (cnt=0; cnt<numinitvar_emfield; cnt++) (initvar_emfield[_ai(gl,i,j,k)])[cnt]=0.0;
}
for (cntzone=0; cntzone<numsubzone; cntzone++){
for (dim=0; dim<nd; dim++){
xmin[cntzone][dim]=1e99;
xmax[cntzone][dim]=-1e99;
}
for_ijk(subzone[cntzone],is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){
for (dim=0; dim<nd; dim++){
xmin[cntzone][dim]=min(xmin[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim));
xmax[cntzone][dim]=max(xmax[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim));
}
}
}
}
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
wfprintf(stdout,"EMfield/%ld",numsubzone*numproc);
#else
wfprintf(stdout,"EMfield/%ld",numsubzone);
#endif
#if defined(OPENMPTHREADS) //&& !defined(DISTMPI)
#pragma omp parallel for private(l_file,cntzone,cnt,thisweight,dim,zone,i,j,k) schedule(dynamic)
#endif
for (cntzone=0; cntzone<numsubzone; cntzone++){
for (l_file=0; l_file<numnodes; l_file++){
if (is_data_point_in_domain(x_file[l_file],xmin[cntzone],xmax[cntzone],radiusmax2_file[l_file])){
zone=subzone[cntzone];
if (find_interpolation_zone(np,gl,TYPELEVEL_EMFIELD,x_file[l_file],radiusmax2_file[l_file],&zone)){
for_jik(zone,is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){
find_interpolation_weight(np,gl,_ai(gl,i,j,k),x_file[l_file],dx1_file[l_file],
#ifdef _2DL
dx2_file[l_file],
#endif
#ifdef _3DL
dx3_file[l_file],
#endif
radiusmax2_file[l_file],&thisweight);
#ifdef OPENMPTHREADS
omp_set_lock(&(nodelock[_ai(gl,i,j,k)]));
#endif
if (thisweight>1e-99) {
weight[_ai(gl,i,j,k)]+=thisweight;
for (cnt=0; cnt<numinitvar_emfield; cnt++)
initvar_emfield[_ai(gl,i,j,k)][cnt]+=thisweight*initvar_emfield_file[l_file][cnt];
}
#ifdef OPENMPTHREADS
omp_unset_lock(&(nodelock[_ai(gl,i,j,k)]));
#endif
}
}
}
}
}
// if (mod(cntzone,numsubzone/100+1)==0) wfprintf(stdout,".");
fprintf(stdout,".");
fflush(stdout);
}
#ifdef OPENMPTHREADS
#pragma omp parallel for private(i,j,k,cnt) schedule(static)
#endif
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
if (weight[_ai(gl,i,j,k)]>1e-99 && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) {
for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[_ai(gl,i,j,k)][cnt]=initvar_emfield[_ai(gl,i,j,k)][cnt]/weight[_ai(gl,i,j,k)];
init_node_emfield(np[_ai(gl,i,j,k)], gl, defaultinitvartypeemfield, initvar_emfield[_ai(gl,i,j,k)]);
np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE;
}
}
free(initvar_emfield);
free(initvar_emfield_file);
#endif //EMFIELD
free(subzone);
free(xmin);
free(xmax);
fclose(datafile);
free(weight);
#ifdef OPENMPTHREADS
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
omp_destroy_lock(&(nodelock[_ai(gl,i,j,k)]));
}
free(nodelock);
#endif
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
if (rank!=0) {
gl->effiter_U=0.0;
gl->effiter_R=0.0;
#ifdef EMFIELD
gl->effiter_U_emfield=0.0;
gl->effiter_R_emfield=0.0;
#endif
}
#endif
wfprintf(stdout,"done;\n");
free(x_file);
free(dx1_file);
#ifdef _2DL
free(dx2_file);
#endif
#ifdef _3DL
free(dx3_file);
#endif
free(radiusmax2_file);
}
void write_data_file_interpolation(char *filename, np_t *np, gl_t *gl){
FILE *datafile;
long i,j,k,cnt;
dim_t dx1,x;
#ifdef _2DL
dim_t dx2;
#endif
#ifdef _3DL
dim_t dx3;
#endif
double tmp_time, tmp_dt;
long numnodes,dim;
int TYPELEVEL,pass,passmax;
bool *NODEVALID;
initvar_t initvar;
double effiter_U,effiter_R;
#ifdef EMFIELD
double effiter_U_emfield,effiter_R_emfield;
initvar_emfield_t initvar_emfield;
#endif
#ifdef DISTMPI
int rank;
MPI_Status MPI_Status1;
#endif
/* nodes may be suspended. Hence, ensure that appropriate nodes are
resumed. */
resume_nodes_in_zone(np,gl,gl->domain);
NODEVALID=(bool *)malloc(sizeof(bool)*(gl->domain_lim_all.ie-gl->domain_lim_all.is+1)
#ifdef _2DL
*(gl->domain_lim_all.je-gl->domain_lim_all.js+1)
#endif
#ifdef _3DL
*(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1)
#endif
);
effiter_U=gl->effiter_U;
effiter_R=gl->effiter_R;
#ifdef EMFIELD
effiter_U_emfield=gl->effiter_U_emfield;
effiter_R_emfield=gl->effiter_R_emfield;
#endif
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#ifdef EMFIELD
MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#endif
#endif
datafile = wfopen(filename, "w");
wfprintf(stdout,"Writing to CFDWARP interpolation data file %s..",filename);
#ifdef EMFIELD
passmax=2;
#else
passmax=1;
#endif
for (pass=1; pass<=passmax; pass++){
if (pass==1){
TYPELEVEL=TYPELEVEL_FLUID;
} else {
#ifdef EMFIELD
TYPELEVEL=TYPELEVEL_EMFIELD;
#endif
}
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
find_NODEVALID_on_domain_all(np, gl, TYPELEVEL, NODEVALID);
numnodes=0;
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
if (NODEVALID[_ai_all(gl,i,j,k)]) {
numnodes++;
}
}
if (pass==1){
#ifdef UNSTEADY
tmp_time=gl->time;
tmp_dt=gl->dt;
#else
tmp_time=0.0;
tmp_dt=dt_steady;
#endif
wfprintf(datafile,"WARPINTFORMAT001 numnodes=%ld nf=%ld nd=%ld ns=%ld windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E time=%E dt=%E\n",numnodes,nf,nd, ns,gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL,tmp_time,tmp_dt);
} else {
#ifdef EMFIELD
wfprintf(datafile,"WARPINTFORMAT001 numnodes_emfield=%ld nfe=%ld nd=%ld Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E\n",numnodes,nfe,nd,gl->Lc,effiter_U_emfield,effiter_R_emfield);
#endif
}
for_ijk(gl->domain_all,is,js,ks,ie,je,ke){
#ifdef DISTMPI
if (pass==1){
if (_node_rank(gl,i,j,k)==rank) {
if (NODEVALID[_ai_all(gl,i,j,k)]) {
find_default_initvar(np, gl, _ai(gl,i,j,k), initvar);
} else {
for (cnt=0; cnt<numinitvar; cnt++) initvar[cnt]=0.0;
}
if (rank!=0) {
MPI_Ssend(initvar,numinitvar,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
}
}
if (rank==0 && _node_rank(gl,i,j,k)!=0){
MPI_Recv(initvar,numinitvar,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1);
}
} else {
#ifdef EMFIELD
if (_node_rank(gl,i,j,k)==rank) {
if (NODEVALID[_ai_all(gl,i,j,k)]) {
find_default_initvar_emfield(np, gl, _ai(gl,i,j,k),initvar_emfield);
} else {
for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[cnt]=0.0;
}
if (rank!=0) {
MPI_Ssend(initvar_emfield,numinitvar_emfield,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
}
}
if (rank==0 && _node_rank(gl,i,j,k)!=0){
MPI_Recv(initvar_emfield,numinitvar_emfield,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1);
}
#endif
}
#else
if (pass==1){
if (NODEVALID[_ai_all(gl,i,j,k)]) {
find_default_initvar(np, gl, _ai(gl,i,j,k), initvar);
} else {
for (cnt=0; cnt<numinitvar; cnt++) initvar[cnt]=0.0;
}
} else {
#ifdef EMFIELD
if (NODEVALID[_ai_all(gl,i,j,k)]) {
find_default_initvar_emfield(np, gl, _ai(gl,i,j,k), initvar_emfield);
} else {
for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[cnt]=0.0;
}
#endif
}
#endif
if (NODEVALID[_ai_all(gl,i,j,k)]) {
#ifdef DISTMPI
if (_node_rank(gl,i,j,k)==rank) {
#endif
for (dim=0; dim<nd; dim++) x[dim]=_x(np[_ai(gl,i,j,k)],dim);
for (dim=0; dim<nd; dim++){
if ((i<gl->domain_all.ie && NODEVALID[_ai_all(gl,i+1,j,k)]) && (i>gl->domain_all.is && NODEVALID[_ai_all(gl,i-1,j,k)])) {
dx1[dim]=0.5*(np[_ai(gl,i+1,j,k)].bs->x[dim]-np[_ai(gl,i-1,j,k)].bs->x[dim]);
} else {
if (i<gl->domain_all.ie && NODEVALID[_ai_all(gl,i+1,j,k)]) {
dx1[dim]=(np[_ai(gl,i+1,j,k)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]);
} else {
if (i>gl->domain_all.is && NODEVALID[_ai_all(gl,i-1,j,k)]) {
dx1[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i-1,j,k)].bs->x[dim]);
} else {
fatal_error("Couldn't find adjacent valid node along i needed for interpolation.");
}
}
}
#ifdef _2DL
if ((j<gl->domain_all.je && NODEVALID[_ai_all(gl,i,j+1,k)]) && (j>gl->domain_all.js && NODEVALID[_ai_all(gl,i,j-1,k)])) {
dx2[dim]=0.5*(np[_ai(gl,i,j+1,k)].bs->x[dim]-np[_ai(gl,i,j-1,k)].bs->x[dim]);
} else {
if (j<gl->domain_all.je && NODEVALID[_ai_all(gl,i,j+1,k)]) {
dx2[dim]=(np[_ai(gl,i,j+1,k)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]);
} else {
if (j>gl->domain_all.js && NODEVALID[_ai_all(gl,i,j-1,k)]) {
dx2[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i,j-1,k)].bs->x[dim]);
} else {
fatal_error("Couldn't find adjacent valid node along j needed for interpolation.");
}
}
}
#endif
#ifdef _3DL
if ((k<gl->domain_all.ke && NODEVALID[_ai_all(gl,i,j,k+1)]) && (k>gl->domain_all.ks && NODEVALID[_ai_all(gl,i,j,k-1)])) {
dx3[dim]=0.5*(np[_ai(gl,i,j,k+1)].bs->x[dim]-np[_ai(gl,i,j,k-1)].bs->x[dim]);
} else {
if (k<gl->domain_all.ke && NODEVALID[_ai_all(gl,i,j,k+1)]) {
dx3[dim]=(np[_ai(gl,i,j,k+1)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]);
} else {
if (k>gl->domain_all.ks && NODEVALID[_ai_all(gl,i,j,k-1)]) {
dx3[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i,j,k-1)].bs->x[dim]);
} else {
fatal_error("Couldn't find adjacent valid node along k needed for interpolation.");
}
}
}
#endif
}
#ifdef DISTMPI
}
if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(x,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(x,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1);
if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx1,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx1,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1);
#ifdef _2DL
if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx2,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx2,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1);
#endif
#ifdef _3DL
if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx3,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx3,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1);
#endif
#endif
if (pass==1) {
wfwrite(initvar, sizeof(initvar_t), 1, datafile);
} else {
#ifdef EMFIELD
wfwrite(initvar_emfield, sizeof(initvar_emfield_t), 1, datafile);
#endif
}
wfwrite(x, sizeof(dim_t), 1, datafile);
wfwrite(dx1, sizeof(dim_t), 1, datafile);
#ifdef _2DL
wfwrite(dx2, sizeof(dim_t), 1, datafile);
#endif
#ifdef _3DL
wfwrite(dx3, sizeof(dim_t), 1, datafile);
#endif
} //end if nodevalid
} // for_ijk
}//pass
#ifdef DISTMPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
wfclose(datafile);
wfprintf(stdout,"done.\n");
free(NODEVALID);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
ll.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
int CHUNK_SIZE = 32;
const int MAX_FIB = 48; // for numbers > 47, fib(n) overflows integer range
typedef struct Node {
long data;
long fib_data;
struct Node *next;
} Node;
long fib(long i) {
long ret = 0, temp = 1;
long j;
for (j = 0; j < i; ++j) {
long t = temp + ret;
ret = temp;
temp = t;
}
return ret;
}
void update_list(Node *curr) {
int n = omp_get_num_threads();
size_t wlsize;
Node **worklist;
size_t head = 0, tail = 0;
#pragma omp parallel shared(curr, worklist, head, tail, wlsize)
{
int tid = omp_get_thread_num();
if (tid == 0) {
wlsize = (omp_get_num_threads()*CHUNK_SIZE);
worklist = (Node **) malloc(sizeof(Node *)*wlsize);
}
#pragma omp barrier
if (tid == 0) {
Node *buf[CHUNK_SIZE];
int size;
while (curr != NULL) {
size = 0;
while (curr != NULL && size < CHUNK_SIZE) {
buf[size] = curr;
curr = curr->next;
size++;
}
while (tail - head == wlsize) ;
int i;
for (i = 0; i < size; ++i)
worklist[(i+tail)%wlsize] = buf[i];
tail += size;
}
} else {
Node *buf[CHUNK_SIZE];
int size;
while (curr != NULL || head != tail) {
size = 0;
#pragma omp critical
{
if (head != tail) {
while (tail == head) ;
while (head + size < tail && size < CHUNK_SIZE) {
buf[size] = worklist[(head+size)%wlsize];
size++;
}
head += size;
}
}
int i;
for (i = 0; i < size; ++i)
buf[i]->fib_data = fib(buf[i]->data);
}
}
}
free(worklist);
}
Node *make_node(unsigned int val) {
Node *n = (Node *) malloc(sizeof (Node));
n->data = val;
n->fib_data = -1;
n->next = NULL;
return n;
}
void print_list(Node *head) {
while (head != NULL) {
printf("%d %ld --> ", head->data, head->fib_data);
head = head->next;
}
printf("\n");
}
int verify_list(Node *curr) {
while (curr != NULL) {
if (curr->fib_data != fib(curr->data))
return -1;
curr = curr->next;
}
return 0;
}
void destroy_list(Node *curr) {
Node *temp;
while (curr != NULL) {
temp = curr;
curr = curr->next;
free(temp);
}
}
int main(int argc, char *argv[]) {
if (argc < 2) {
printf("Usage: %s NUM_NODES [CHUNK_SIZE]\n", argv[0]);
return 0;
}
unsigned int randval;
FILE *f;
f = fopen("/dev/urandom", "r");
size_t N;
sscanf(argv[1], "%zu", &N);
if (argc >= 3)
sscanf(argv[2], "%d", &CHUNK_SIZE);
Node *head = NULL;
Node **temp = &head;
size_t i;
for (i = 0; i < N; ++i) {
fread(&randval, sizeof(randval), 1, f);
*temp = make_node(randval%MAX_FIB);
temp = &(*temp)->next;
}
fclose(f);
struct timespec t1, t2;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &t1);
update_list(head);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &t2);
long elapsed = (t2.tv_sec - t1.tv_sec)*1e9 + (t2.tv_nsec - t1.tv_nsec);
printf("CPU time of all threads = %lf sec\n", (double)elapsed/1e9);
//if (-1 == verify_list(head))
//printf("List not processed completely\n");
destroy_list(head);
return 0;
}
|
DRB044-adi-tile-no.c | /**
* adi.c: This file is part of the PolyBench/C 3.2 test suite.
* Alternating Direction Implicit solver with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 10x1024x1024. */
#include "polybench/adi.h"
/* Array initialization. */
static void init_array(int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c3;
int c2;
int c4;
if (n >= 1) {
#pragma omp parallel for private(c4, c2, c3)
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {
X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;
A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;
B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double X[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",X[i][j]);
if ((i * 500 + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_adi(int tsteps,int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i1;
//int i2;
//#pragma scop
{
int c0;
int c2;
int c8;
int c9;
int c15;
if (n >= 1 && tsteps >= 1) {
for (c0 = 0; c0 <= tsteps + -1; c0++) {
if (n >= 2) {
#pragma omp parallel for private(c15, c9, c8)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];
}
}
}
}
}
#pragma omp parallel for private(c15)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];
}
}
if (n >= 2) {
#pragma omp parallel for private(c15, c9, c8)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];
}
}
}
}
}
#pragma omp parallel for private(c15)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*X)[500 + 0][500 + 0];
X = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *X, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_adi(tsteps,n, *X, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *X);
/* Be clean. */
free(((void *)X));
;
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
app.c | /**
* Christina Giannoula
* cgiannoula: christina.giann@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/partition.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB
#define ANSI_COLOR_RED "\x1b[31m"
#define ANSI_COLOR_GREEN "\x1b[32m"
#define ANSI_COLOR_RESET "\x1b[0m"
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
* 4. Help structures for data partitioning
*/
static struct RBDBCOOMatrix* A;
static struct RBDBCSRMatrix* B;
static struct RBDCSRMatrix* C;
static struct COOMatrix* D;
static val_dt* x;
static val_dt* z;
static val_dt* y;
static struct partition_info_t *part_info;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t block_rows_per_dpu;
uint32_t cols_per_dpu;
uint32_t prev_block_rows_dpu;
uint32_t block_start;
uint32_t blocks;
uint32_t blocks_pad;
uint32_t merge;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_row_partition
* @param factor n to create partitions
* @param column_partitions to create vert_partitions
* @param horz_partitions to return the 2D partitioning
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host CPU
*/
void spmv_host(val_dt *y, struct RBDBCOOMatrix *rbdbcooMtx, val_dt *x) {
uint64_t total_blocks = 0;
for (uint32_t c = 0; c < rbdbcooMtx->vert_partitions; c++) {
uint32_t partition = c;
uint32_t col_offset = c * rbdbcooMtx->tile_width;
for(uint64_t n=0; n < rbdbcooMtx->blocks_per_vert_partition[partition]; n++) {
uint64_t i = rbdbcooMtx->bind[total_blocks + n].rowind;
uint64_t j = rbdbcooMtx->bind[total_blocks + n].colind;
for(uint64_t blr=0; blr < rbdbcooMtx->row_block_size; blr++){
val_dt acc = 0;
for(uint64_t blc=0; blc < rbdbcooMtx->col_block_size; blc++) {
acc += rbdbcooMtx->bval[total_blocks * rbdbcooMtx->row_block_size * rbdbcooMtx->col_block_size + n * rbdbcooMtx->col_block_size * rbdbcooMtx->row_block_size + blr * rbdbcooMtx->col_block_size + blc] * x[col_offset + j * rbdbcooMtx->col_block_size + blc];
}
y[i * rbdbcooMtx->row_block_size + blr] += acc;
}
}
total_blocks += rbdbcooMtx->blocks_per_vert_partition[partition];
}
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
uint32_t nr_of_ranks;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
D = readCOOMatrix(p.fileName);
sortCOOMatrix(D);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
C = coo2rbdcsr(D, horz_partitions, vert_partitions);
freeCOOMatrix(D);
B = rbdcsr2rbdbcsr(C, p.row_blsize, p.col_blsize);
sortRBDBCSRMatrix(B);
countNNZperBlockRBDBCSRMatrix(B);
freeRBDCSRMatrix(C);
A = rbdbcsr2rbdbcoo(B);
freeRBDBCSRMatrix(B);
// Initialize partition data
part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS);
#if FG_TRANS
struct dpu_set_t rank;
uint32_t each_rank;
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank;
}
int sum = 0;
for(int i=0; i < p.max_nranks+1; i++) {
part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum;
sum += part_info->active_dpus_per_rank[i];
}
#endif
// Initialize help data - Padding needed
uint32_t ncols_pad = A->ncols + A->tile_width + A->col_block_size;
uint32_t tile_width_pad = A->num_block_cols * A->col_block_size;
uint32_t nrows_pad = A->nrows + A->row_block_size;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
#if INT8
if (tile_width_pad % 2 != 0)
tile_width_pad++;
#endif
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Allocate output vector
z = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Load-balance blocks across DPUs of the same vertical partition
partition_by_block(A, part_info);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_block_rows_per_dpu = 0;
uint64_t max_blocks_per_dpu = 0;
// Timer for measurements
Timer timer;
i = 0;
uint32_t total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for block rows and non-zero elements needed for CPU-DPU transfers
uint32_t tile_horz_indx = i % A->horz_partitions;
uint32_t tile_vert_indx = i / A->horz_partitions;
uint32_t block_rows_per_dpu = part_info->brow_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx];
uint32_t prev_block_rows_dpu = part_info->brow_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx];
if (block_rows_per_dpu > max_block_rows_per_dpu)
max_block_rows_per_dpu = block_rows_per_dpu;
unsigned int blocks;
blocks = part_info->blocks_dpu[i];
if (blocks > max_blocks_per_dpu)
max_blocks_per_dpu = blocks;
// Keep information per DPU
dpu_info[i].block_rows_per_dpu = block_rows_per_dpu;
dpu_info[i].cols_per_dpu = A->tile_width;
dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu;
dpu_info[i].blocks = blocks;
// Find input arguments per DPU
input_args[i].block_rows = block_rows_per_dpu;
input_args[i].start_block_row = prev_block_rows_dpu;
input_args[i].tcols = tile_width_pad;
input_args[i].row_block_size = A->row_block_size;
input_args[i].col_block_size = A->col_block_size;
//input_args[i].blocks = blocks;
#if BLNC_TSKLT_BLOCK
// Load-balance blocks across tasklets
partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#else
// Load-balance nnzs across tasklets
partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#endif
uint32_t t;
for (t = 0; t < NR_TASKLETS; t++) {
// Find input arguments per tasklet
input_args[i].start_block[t] = part_info->block_split_tasklet[i * (NR_TASKLETS+2) + t];
input_args[i].blocks_per_tasklet[t] = part_info->block_split_tasklet[i * (NR_TASKLETS+2) + (t+1)] - part_info->block_split_tasklet[i * (NR_TASKLETS+2) + t];
}
total_blocks += part_info->blocks_dpu[i];
}
#if FG_TRANS
// Find max number of block rows (subset of elements of the output vector) among DPUs of each rank
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t max_block_rows_cur_rank = 0;
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank];
for (uint32_t k = 0; k < nr_dpus_in_rank; k++) {
if (start_dpu + k >= nr_of_dpus)
break;
if (dpu_info[start_dpu + k].block_rows_per_dpu > max_block_rows_cur_rank)
max_block_rows_cur_rank = dpu_info[start_dpu + k].block_rows_per_dpu;
}
#if INT8
if (max_block_rows_cur_rank % 2 != 0)
max_block_rows_cur_rank++;
#endif
part_info->max_block_rows_per_rank[each_rank] = (uint32_t) max_block_rows_cur_rank;
}
#endif
// Initializations for parallel transfers with padding needed
#if INT8
if (max_block_rows_per_dpu % 2 != 0)
max_block_rows_per_dpu++;
#endif
if (max_blocks_per_dpu % 2 != 0)
max_blocks_per_dpu++;
// Re-allocations for padding needed
A->bind = (struct bind_t *) realloc(A->bind, (max_blocks_per_dpu * nr_of_dpus * sizeof(struct bind_t)));
A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt)));
y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = (max_blocks_per_dpu * sizeof(struct bind_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_block_rows = max_block_rows_per_dpu;
input_args[i].max_blocks = max_blocks_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
// Copy Browind + Bcolind
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bind + total_blocks));
total_blocks += part_info->blocks_dpu[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt), max_blocks_per_dpu * sizeof(struct bind_t), DPU_XFER_DEFAULT));
// Copy Bvalues
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size)));
total_blocks += part_info->blocks_dpu[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_blocks_per_dpu * sizeof(struct bind_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (default: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
#if CG_TRANS
// Coarse-grained data transfers in the output vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the output vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
i = 0;
//struct dpu_set_t rank;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_block_rows_per_rank[i] * A->row_block_size * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t, b;
for (c = 0; c < A->vert_partitions; c++) {
for (r = 0; r < A->horz_partitions; r++) {
#pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_block_rows_per_dpu, c, r) private(t, b)
for (t = 0; t < part_info->brow_split[c * (2 * A->horz_partitions) + 2 * r+1] - part_info->brow_split[c * (2 * A->horz_partitions) + 2 * r]; t++) {
for (b = 0; b < A->row_block_size; b++) {
z[(part_info->brow_split[c * (2 * A->horz_partitions) + 2 * r] + t) * A->row_block_size + b] += y[(c * A->horz_partitions + r) * max_block_rows_per_dpu * A->row_block_size + t * A->row_block_size + b];
}
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
startTimer(&timer, 4);
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (i = 0; i < A->nrows; i++) {
if(y_host[i] != z[i]) {
status = false;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeRBDBCOOMatrix(A);
free(x);
free(y);
free(z);
partition_free(part_info);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
spmmd_x_csr_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_SPMAT_CSR *matA, const ALPHA_SPMAT_CSR *matB, ALPHA_Number *matC, const ALPHA_INT ldc)
{
if (matA->cols != matB->rows || ldc < matB->cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT m = matA->rows;
for(ALPHA_INT i = 0; i < matA->rows; i++)
for(ALPHA_INT j = 0; j < matB->cols; j++)
{
alpha_setzero(matC[index2(i, j, ldc)]);
}
ALPHA_INT num_thread = alpha_get_thread_num();
ALPHA_INT64 flop[m];
memset(flop, '\0', m * sizeof(ALPHA_INT64));
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT ar = 0; ar < m; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
flop[ar] += matB->rows_end[br] - matB->rows_start[br];
}
}
for (ALPHA_INT i = 1; i < m; i++)
{
flop[i] += flop[i - 1];
}
ALPHA_INT partition[num_thread + 1];
balanced_partition_row_by_flop(flop, m, num_thread, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_thread)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_m_s = partition[tid];
ALPHA_INT local_m_e = partition[tid + 1];
for (ALPHA_INT ar = local_m_s; ar < local_m_e; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
ALPHA_Number av = matA->values[ai];
for (ALPHA_INT bi = matB->rows_start[br]; bi < matB->rows_end[br]; bi++)
{
ALPHA_INT bc = matB->col_indx[bi];
ALPHA_Number bv = matB->values[bi];
alpha_madde(matC[index2(ar, bc, ldc)], av, bv);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel
{
#pragma omp for
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
initialize.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize()
{
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (m = 0; m < 5; m++) {
u[k][j][i][m] = 1.0;
}
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta, &Pface[ix][0][0]);
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
i = 0;
xi = 0.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
i = grid_points[0]-1;
xi = 1.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
j = 0;
eta = 0.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
j = grid_points[1]-1;
eta = 1.0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
k = 0;
zeta = 0.0;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i =0; i <= grid_points[0]-1; i++) {
xi = (double)(i) *dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
k = grid_points[2]-1;
zeta = 1.0;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
#pragma omp target update to(u)
}
void lhsinit(double lhs[][3][5][5], int size)
{
int i, m, n;
i = size;
//---------------------------------------------------------------------
// zero the whole left hand side for starters
//---------------------------------------------------------------------
for (n = 0; n < 5; n++) {
for (m = 0; m < 5; m++) {
lhs[0][0][n][m] = 0.0;
lhs[0][1][n][m] = 0.0;
lhs[0][2][n][m] = 0.0;
lhs[i][0][n][m] = 0.0;
lhs[i][1][n][m] = 0.0;
lhs[i][2][n][m] = 0.0;
}
}
//---------------------------------------------------------------------
// next, set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
lhs[0][1][m][m] = 1.0;
lhs[i][1][m][m] = 1.0;
}
}
|
dsgesv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zcgesv.c, mixed zc -> ds, Fri Sep 28 17:38:17 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
#include <omp.h>
#include <stdbool.h>
/***************************************************************************//**
*
* @ingroup plasma_gesv
*
* Computes the solution to a system of linear equations A * X = B, where A is
* an n-by-n matrix and X and B are n-by-nrhs matrices.
*
* plasma_dsgesv first factorizes the matrix using plasma_sgetrf and uses
* this factorization within an iterative refinement procedure to produce a
* solution with COMPLEX*16 normwise backward error quality (see below). If
* the approach fails the method falls back to a COMPLEX*16 factorization and
* solve.
*
* The iterative refinement is not going to be a winning strategy if
* the ratio COMPLEX performance over COMPLEX*16 performance is too
* small. A reasonable strategy should take the number of right-hand
* sides and the size of the matrix into account. This might be done
* with a call to ILAENV in the future. Up to now, we always try
* iterative refinement.
*
* The iterative refinement process is stopped if iter > itermax or
* for all the RHS we have: Rnorm < sqrt(n)*Xnorm*Anorm*eps, where:
*
* - iter is the number of the current iteration in the iterative refinement
* process
* - Rnorm is the Infinity-norm of the residual
* - Xnorm is the Infinity-norm of the solution
* - Anorm is the Infinity-operator-norm of the matrix A
* - eps is the machine epsilon returned by DLAMCH('Epsilon').
* The values itermax is fixed to 30.
*
*******************************************************************************
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrix B. nrhs >= 0.
*
* @param[in,out] pA
* The n-by-n matrix A.
* On exit, contains the LU factors of A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] pB
* The n-by-nrhs matrix of right hand side matrix B.
* This matrix remains unchanged.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
* @param[out] pX
* If return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldx
* The leading dimension of the array X. ldx >= max(1,n).
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dsgesv
* @sa plasma_dsgesv
* @sa plasma_dgesv
*
******************************************************************************/
int plasma_dsgesv(int n, int nrhs,
double *pA, int lda, int *ipiv,
double *pB, int ldb,
double *pX, int ldx, int *iter)
{
// Get PLASMA context
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
if (ldx < imax(1, n)) {
plasma_error("illegal value of ldx");
return -9;
}
// quick return
*iter = 0;
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_getrf(plasma, PlasmaRealFloat, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t X;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &X);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Create additional tile matrices.
plasma_desc_t R, As, Xs;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
B.m, B.n, 0, 0, B.m, B.n, &R);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
A.m, A.n, 0, 0, A.m, A.n, &As);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
X.m, X.n, 0, 0, X.m, X.n, &Xs);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&As);
return retval;
}
// Allocate tiled workspace for Infinity norm calculations.
size_t lwork = imax((size_t)A.nt*A.n+A.n, (size_t)X.mt*X.n+(size_t)R.mt*R.n);
double *work = (double*)malloc((lwork)*sizeof(double));
double *Rnorm = (double*)malloc(((size_t)R.n)*sizeof(double));
double *Xnorm = (double*)malloc(((size_t)X.n)*sizeof(double));
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_dsgesv(A, ipiv, B, X, As, Xs, R, work, Rnorm, Xnorm, iter,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(X, pX, ldx, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&As);
plasma_desc_destroy(&Xs);
free(work);
free(Rnorm);
free(Xnorm);
// Return status.
int status = sequence.status;
return status;
}
// Checks, that convergence criterion is true for all columns of R and X
static bool conv(double *Rnorm, double *Xnorm, int n, double cte) {
bool value = true;
for (int i = 0; i < n; i++) {
if (Rnorm[i] > Xnorm[i] * cte) {
value = false;
break;
}
}
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_gesv
*
* Solves a general linear system of equations using iterative refinement
* with the LU factor computed using plasma_sgetrf.
* Non-blocking tile version of plasma_dsgesv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in,out] X
* Descriptor of matrix X.
*
* @param[out] As
* Descriptor of auxiliary matrix A in single complex precision.
*
* @param[out] Xs
* Descriptor of auxiliary matrix X in single complex precision.
*
* @param[out] R
* Descriptor of auxiliary remainder matrix R.
*
* @param[out] work
* Workspace needed to compute infinity norm of the matrix A.
*
* @param[out] Rnorm
* Workspace needed to store the max value in each of resudual vectors.
*
* @param[out] Xnorm
* Workspace needed to store the max value in each of currenct solution
* vectors.
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PLASMA_SUCCESS (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dsgesv
* @sa plasma_omp_dsgesv
* @sa plasma_omp_dgesv
*
******************************************************************************/
void plasma_omp_dsgesv(plasma_desc_t A, int *ipiv,
plasma_desc_t B, plasma_desc_t X,
plasma_desc_t As, plasma_desc_t Xs, plasma_desc_t R,
double *work, double *Rnorm, double *Xnorm, int *iter,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
const int itermax = 30;
const double zmone = -1.0;
const double zone = 1.0;
*iter = 0;
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(X) != PlasmaSuccess) {
plasma_error("invalid X");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(As) != PlasmaSuccess) {
plasma_error("invalid As");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(Xs) != PlasmaSuccess) {
plasma_error("invalid Xs");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(R) != PlasmaSuccess) {
plasma_error("invalid R");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// workspaces for damax
double *workX = work;
double *workR = &work[X.mt*X.n];
// Compute some constants.
double cte;
double eps = LAPACKE_dlamch_work('E');
double Anorm;
plasma_pdlange(PlasmaInfNorm, A, work, &Anorm, sequence, request);
// Convert B from double to single precision, store result in Xs.
plasma_pdlag2s(B, Xs, sequence, request);
// Convert A from double to single precision, store result in As.
plasma_pdlag2s(A, As, sequence, request);
// Compute the LU factorization of As.
//#pragma omp taskwait
plasma_psgetrf(As, ipiv, sequence, request);
//#pragma omp taskwait
// Solve the system As * Xs = Bs.
plasma_psgeswp(PlasmaRowwise, Xs, ipiv, 1, sequence, request);
plasma_pstrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, As, Xs, sequence, request);
plasma_pstrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, As, Xs, sequence, request);
// Convert Xs to double precision.
plasma_pslag2d(Xs, X, sequence, request);
// Compute R = B - A * X.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pdgemm(PlasmaNoTrans, PlasmaNoTrans,
zmone, A, X, zone, R, sequence, request);
// Check whether the nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter=0 and return.
plasma_pdamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
cte = Anorm * eps * sqrt((double)A.n);
if (conv(Rnorm, Xnorm, R.n, cte)) {
*iter = 0;
return;
}
}
// iterative refinement
for (int iiter = 0; iiter < itermax; iiter++) {
// Convert R from double to single precision, store result in Xs.
plasma_pdlag2s(R, Xs, sequence, request);
// Solve the system As * Xs = Rs.
//#pragma omp taskwait
plasma_psgeswp(PlasmaRowwise, Xs, ipiv, 1, sequence, request);
plasma_pstrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, As, Xs, sequence, request);
plasma_pstrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, As, Xs, sequence, request);
// Convert Xs back to double precision and update the current iterate.
plasma_pslag2d(Xs, R, sequence, request);
plasma_pdgeadd(PlasmaNoTrans, zone, R, zone, X, sequence, request);
// Compute R = B - A * X.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pdgemm(PlasmaNoTrans, PlasmaNoTrans, zmone, A, X, zone, R,
sequence, request);
// Check whether nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter = iiter > 0 and return.
plasma_pdamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
if (conv(Rnorm, Xnorm, R.n, cte)) {
*iter = iiter+1;
return;
}
}
}
// If we are at this place of the code, this is because we have performed
// iter = itermax iterations and never satisfied the stopping criterion,
// set up the iter flag accordingly and follow up with double precision
// routine.
*iter = -itermax - 1;
//#if !defined(PLASMA_DSGESV_WORKAROUND)
// Compute LU factorization of A.
//#pragma omp taskwait
plasma_pdgetrf(A, ipiv, sequence, request);
// Solve the system A * X = B.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, X, sequence, request);
//#pragma omp taskwait
plasma_pdgeswp(PlasmaRowwise, X, ipiv, 1, sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, A, X, sequence, request);
plasma_pdtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, A, X, sequence, request);
//#endif
}
|
clanhe.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlanhe.c, normal z -> c, Fri Sep 28 17:38:07 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lanhe
*
* Returns the norm of a Hermitian matrix as
*
* clanhe = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm
* (
* ( norm1(A), NORM = PlasmaOneNorm
* (
* ( normI(A), NORM = PlasmaInfNorm
* (
* ( normF(A), NORM = PlasmaFrobeniusNorm
*
* where norm1 denotes the one norm of a matrix (maximum column sum),
* normI denotes the infinity norm of a matrix (maximum row sum) and
* normF denotes the Frobenius norm of a matrix (square root of sum
* of squares). Note that max(abs(A(i,j))) is not a consistent matrix
* norm.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: Max norm
* - PlasmaOneNorm: One norm
* - PlasmaInfNorm: Infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the Hermitian matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
*******************************************************************************
*
* @retval float
* The specified norm of the Hermitian matrix A.
*
*******************************************************************************
*
* @sa plasma_omp_clanhe
* @sa plasma_clanhe
*
******************************************************************************/
float plasma_clanhe(plasma_enum_t norm, plasma_enum_t uplo,
int n,
plasma_complex32_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) {
plasma_error("illegal value of norm");
return -1;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (n == 0)
return 0.0;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lansy(plasma, PlasmaComplexFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Allocate workspace.
float *work = NULL;
switch (norm) {
case PlasmaMaxNorm:
work = (float*)malloc((size_t)A.mt*A.nt*sizeof(float));
break;
case PlasmaOneNorm:
case PlasmaInfNorm:
work = (float*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(float));
break;
case PlasmaFrobeniusNorm:
work = (float*)malloc((size_t)2*A.mt*A.nt*sizeof(float));
break;
}
if (work == NULL) {
plasma_error("malloc() failed");
return PlasmaErrorOutOfMemory;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
float value;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_clanhe(norm, uplo, A, work, &value, &sequence, &request);
}
// implicit synchronization
free(work);
// Free matrix in tile layout.
plasma_desc_destroy(&A);
// Return the norm.
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_lanhe
*
* Calculates the max, one, infinity or Frobenius norm of a Hermitian matrix.
* Non-blocking equivalent of plasma_clanhe(). May return before the
* computation is finished. Operates on matrices stored by tiles. All matrices
* are passed through descriptors. All dimensions are taken from the
* descriptors. Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: Max norm
* - PlasmaOneNorm: One norm
* - PlasmaInfNorm: Infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* The descriptor of matrix A.
*
* @param[out] work
* Workspace of size:
* - PlasmaMaxNorm: A.mt*A.nt
* - PlasmaOneNorm: A.mt*A.n + A.n
* - PlasmaInfNorm: A.mt*A.n + A.n
* - PlasmaFrobeniusNorm: 2*A.mt*A.nt
*
* @param[out] value
* The calculated value of the norm requested.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_clanhe
* @sa plasma_omp_clanhe
*
******************************************************************************/
void plasma_omp_clanhe(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) {
plasma_error("illegal value of norm");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0) {
*value = 0.0;
return;
}
// Call the parallel function.
plasma_pclanhe(norm, uplo, A, work, value, sequence, request);
}
|
openmp-ex25.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main(void)
{
int num_threads;
int fib = 1;
int fib_prev = 0;
#pragma omp parallel
{
/* calculating the fibonacci number of the number of threads is trivial,
* but sometimes there is work that must be serial (typically I/O, or
* because there is a lot of state that the threads have built up that
* can't/shouldn't be recalculated) that is inconvenient to have outside
* of a parallel block */
#pragma omp single
{
int i;
num_threads = omp_get_num_threads();
for (i = 2; i <= num_threads; i++) {
int fib_next = fib + fib_prev;
fib_prev = fib;
fib = fib_next;
}
printf("fib(num_threads = %d) = %d\n",num_threads,fib);
}
}
return 0;
}
|
main.c | #include <stdio.h>
#include <time.h>
struct Result
{
long operationsRun;
double millisecondsSpent;
double microsecondsPerOperation;
};
/* Here lies the actual code */
void executeSummation(int range)
{
int output[range][range];
#pragma acc kernels
{
#pragma omp parallel for
for (int a = 0; a < range; a++)
{
for (int b = 0; b < range; b++)
{
int result = 0;
for (int n = 0; n < (a + b); n++)
{
result += n;
}
output[a][b] += result;
}
}
}
}
struct Result runComputation(int range)
{
struct Result result;
clock_t begin = clock();
executeSummation(range);
clock_t end = clock();
result.millisecondsSpent = 1000.0 * (double)(end - begin) / CLOCKS_PER_SEC;
result.microsecondsPerOperation = 1000.0 * result.millisecondsSpent / (double)(range * range);
return result;
}
int main()
{
struct Result result = runComputation(10000);
printf("\nRan %d ops\n", result.operationsRun);
printf("Total execution time: %f ms\n", result.millisecondsSpent);
printf("Total time per op: %f microsecs\n", result.microsecondsPerOperation);
return 0;
} |
hugepages.c | #include "sicm_low.h"
#include "sicmimpl.h"
#include <stdio.h>
#include <time.h>
#include <sys/mman.h>
#ifndef MAP_HUGE_SHIFT
#include <linux/mman.h>
#endif
// 20 MiB
#define SZ 20971520
int main() {
struct sicm_device_list devices = sicm_init();
unsigned int start, end;
unsigned int i;
printf("%d\n", MAP_HUGE_SHIFT);
char* data = sicm_device_alloc(&devices.devices[0], SZ);
start = time(NULL);
//#pragma omp parallel for
for(i = 0; i < 1000000000; i++) {
data[sicm_hash(i) % SZ] = '0';
}
end = time(NULL);
sicm_device_free(&devices.devices[0], data, SZ);
printf("time for normal pages: %d s\n", end - start);
data = sicm_device_alloc(&devices.devices[0], SZ);
start = time(NULL);
//#pragma omp parallel for
for(i = 0; i < 1000000000; i++) {
data[sicm_hash(i) % SZ] = '0';
}
end = time(NULL);
sicm_device_free(&devices.devices[1], data, SZ);
printf("time for huge pages: %d s\n", end - start);
}
|
proj4_transform.c | /* proj4_transform.c Public domain 10/2012 Wesley Ebisuzaki */
/* proj4_ll2xy convert lat/lon to X,Y */
/* proj4_xy2ll convert lat/lon to X,Y */
#include <stdio.h>
#include <stdlib.h>
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
#ifdef USE_PROJ4
#include "proj_api.h"
#include "proj4_wgrib2.h"
int proj4_ll2xy(struct proj4_struct *projection, int n, double *lon, double *lat, double *x, double *y) {
int i;
double rlon, rlat;
if (projection->proj_is_nop == 1) { // lat-lon
#pragma omp parallel for schedule(static) private(i,rlon,rlat)
for (i = 0; i < n; i++) {
rlon = lon[i];
rlat = lat[i];
x[i] = (rlon - projection->x_0);
y[i] = (rlat - projection->y_0);
}
return 0;
}
#pragma omp parallel for schedule(static) private(i,rlon,rlat)
for (i = 0; i < n; i++) {
rlon = lon[i] * DEG_TO_RAD;
rlat = lat[i] * DEG_TO_RAD;
if ( pj_transform(projection->pj_latlon, projection->pj_grid, 1, 1, &rlon, &rlat, NULL) != 0 ) {
x[i] = y[i] = UNDEFINED;
}
else {
x[i] = (rlon - projection->x_0);
y[i] = (rlat - projection->y_0);
}
}
return 0;
}
int proj4_xy2ll(struct proj4_struct *projection, int n, double *x, double *y, double *lon, double *lat) {
int i;
double rlon, rlat;
if (projection->proj_is_nop == 1) { // lat-lon relative to x_0, y_0
#pragma omp parallel for schedule(static) private(i,rlon,rlat)
for (i = 0; i < n; i++) {
rlon = x[i] + projection->x_0;
rlat = y[i] + projection->y_0;
if (rlon >= 360.0) rlon -= 360.0;
if (rlon < 0.0) rlon += 360.0;
if (rlon < 0.0) rlon += 360.0;
lon[i] = rlon;
lat[i] = rlat;
}
return 0;
}
#pragma omp parallel for schedule(static) private(i,rlon,rlat)
for (i = 0; i < n; i++) {
rlon = x[i] + projection->x_0;
rlat = y[i] + projection->y_0;
if ( pj_transform(projection->pj_grid, projection->pj_latlon, 1, 1, &rlon, &rlat, NULL) != 0 ) {
lon[i] = lat[i] = UNDEFINED;
}
else {
lon[i] = rlon * RAD_TO_DEG;
lat[i] = rlat * RAD_TO_DEG;
if (lon[i] < 0.0) lon[i] += 360.0;
if (lon[i] > 360) lon[i] -= 360.0;
}
}
return 0;
}
#endif
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#if defined(USE_MKL) && defined(_OPENMP) && !defined(__CUDACC__)
#define MXNET_USE_MKL_DROPOUT 1
#endif
#if MXNET_USE_MKL_DROPOUT
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // MXNET_USE_MKL_DROPOUT
#define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
mxnet::TShape axes;
dmlc::optional<bool> cudnn_off;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0))
.describe("Axes for variational dropout kernel.");
DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false))
.describe("Whether to turn off cudnn in dropout operator. "
"This option is ignored if axes is specified.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if MXNET_USE_MKL_DROPOUT
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + abs(genImpl.rand() % 4096);
CHECK_GE(seed, 0);
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
static inline bool MKLAvailable() {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
return sizeof(DType) >= sizeof(int);
}
// MKL forward pass
inline void MKLForward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
Stream<xpu> *s = ctx.get_stream<xpu>();
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
if (sizeof(DType) > sizeof(int)) {
// allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr`
Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s);
maskptr = temp.dptr_;
}
BernoulliGenerate(*pgen, count, this->pkeep_, maskptr);
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1;
outptr[i] = dataptr[i] * maskVal;
mask.dptr_[i] = maskVal;
}
}
// MKL backward pass
inline void MKLBackward(const OpContext &ctx,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
const DType *maskptr = mask.dptr_;
const int count = mask.shape_[0] * mask.shape_[1];
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i];
}
}
#endif // #if MXNET_USE_MKL_DROPOUT
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
explicit DropoutOp(const DropoutParam ¶m, Context ctx) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
this->dropout_passthrough_ = true;
#if MXNET_USE_CUDNN_DROPOUT
this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value();
this->ctx_ = ctx;
if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
dtype_ = mshadow::DataType<DType>::kCudnnFlag;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_));
CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
~DropoutOp() {
#if MXNET_USE_CUDNN_DROPOUT
if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_));
CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
inline bool CuDNNAvailable() {
return this->pkeep_ > 0 && !this->cudnn_off_;
}
inline void CuDNNForward(const OpContext &ctx,
const TBlob &in,
const TBlob &mask,
const TBlob &out) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// set dropout state.
ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_);
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = out.Size();
stride[0] = out.Size();
stride[1] = out.Size();
stride[2] = out.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_));
// cudnn uses bits to record the positions that are dropped, so reserve bytes is always
// 1/8 of input size.
CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) <<
"The size of the mask space is smaller than the required cudnn reserved space.";
CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_,
dropout_desc_,
x_desc_,
in.dptr<DType>(),
y_desc_,
out.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
inline void CuDNNBackward(const OpContext &ctx,
const TBlob &out_grad,
const TBlob &mask,
const TBlob &in_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = in_grad.Size();
stride[0] = in_grad.Size();
stride[1] = in_grad.Size();
stride[2] = in_grad.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_,
dropout_desc_,
dy_desc_,
out_grad.dptr<DType>(),
dx_desc_,
in_grad.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
this->dropout_passthrough_ = true;
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &in = in_data[dropout::kData];
const TBlob &out = out_data[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) {
this->dropout_passthrough_ = false;
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLForward(ctx, in_data, out_data);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNForward(ctx, in, mask, out);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in.dptr<DType>(),
this->pkeep_);
return;
} else {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in.shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in.dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>());
});
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (!this->dropout_passthrough_) {
this->dropout_passthrough_ = true;
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLBackward(ctx, in_grad, out_data, out_grad);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNBackward(ctx, grad, mask, gdata);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
} else {
// broardcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
/*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */
mxnet::TShape axes_;
/*! \brief Flag to record whether forward is executed in pass-through mode */
bool dropout_passthrough_;
#if MXNET_USE_CUDNN_DROPOUT
bool cudnn_off_;
Context ctx_;
cudnnDataType_t dtype_;
cudnnDropoutDescriptor_t dropout_desc_;
uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
size_t dropout_reserve_byte_;
cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_;
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
spmm.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <limits>
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <x86intrin.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <mkl_spblas.h>
#include <mkl.h>
#include <omp.h>
#include <vector>
namespace dgl {
namespace aten {
namespace cpu {
typedef struct {
int32_t M, K, N;
int32_t *indptr, *indices;
float *values;
} csrm;
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
#if 1
// // -----------------------------------------------------------------------------------
// // ------------------------------ Optimized Sparse MM3 -------------------------------
// // -----------------------------------------------------------------------------------
template <typename IdType, typename DType, typename Op>
// void sparse_mm3(
void SpMMSumCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out) {
const IdType* IndPtr = csr.indptr.Ptr<IdType>();
const IdType* Indices = csr.indices.Ptr<IdType>();
// const IdType* edges = csr.data.Ptr<IdType>();
DType* C = out.Ptr<DType>();
DType* B = ufeat.Ptr<DType>();
if(sizeof(DType) == 8)
std::cout << "sizeof DType" << sizeof(DType) << " Sizeof IdType " << sizeof(IdType) << std::endl;
#define M_BLOCK_SIZE 1024
#define K_BLOCK_SIZE 4096
#define K_BLOCK_MASK (K_BLOCK_SIZE - 1)
#define N_BLOCK_SIZE 640
#define SORT 0
const int M = csr.num_rows;
const int N = bcast.out_len; //csr.N;
const int K = csr.num_cols;
int nthreads = omp_get_max_threads();
int32_t num_M_blocks = (M + M_BLOCK_SIZE - 1) / M_BLOCK_SIZE;
int32_t num_K_blocks = (K + K_BLOCK_SIZE - 1) / K_BLOCK_SIZE;
csrm block_csr_array[num_M_blocks * num_K_blocks];
//int *cur_col_id = (int *)_mm_malloc(2 * M_BLOCK_SIZE * sizeof(int), 64);
uint64_t startTick, endTick;
startTick = __rdtsc();
#pragma omp parallel
{
int *my_cur_col_id = (int *)_mm_malloc(2 * M_BLOCK_SIZE * sizeof(int), 64);
uint64_t tst = __rdtsc();
int tid = omp_get_thread_num();
#pragma omp for
for(int m = 0; m < num_M_blocks; m++)
{
int32_t M_start = m * M_BLOCK_SIZE;
int32_t M_end = (m + 1) * M_BLOCK_SIZE;
if(M_end > M) M_end = M;
int nnz = IndPtr[M_end] - IndPtr[M_start];
int32_t cur_indices_id = 0;
int32_t *indices = (int32_t *)_mm_malloc(nnz * sizeof(int32_t), 64);
for(int i = M_start; i < M_end; i++)
{
my_cur_col_id[(i - M_start) * 2] = IndPtr[i];
my_cur_col_id[(i - M_start) * 2 + 1] = IndPtr[i + 1];
}
for(int k = 0; k < num_K_blocks; k++)
{
int32_t K_start = k * K_BLOCK_SIZE;
int32_t K_end = (k + 1) * K_BLOCK_SIZE;
if(K_end > K) K_end = K;
csrm cur_csr;
cur_csr.M = M_end - M_start;
cur_csr.K = K_end - K_start;
cur_csr.N = N;
// Create csr_ij
int32_t *indptr = (int32_t *)_mm_malloc((cur_csr.M + 1) * sizeof(int32_t), 64);
cur_csr.indptr = indptr;
cur_csr.indices = indices + cur_indices_id;
cur_csr.values = NULL;
int cur_nnz = 0;
for(int i = M_start; i < M_end; i++)
{
const int row_start = my_cur_col_id[(i - M_start) * 2];
const int row_end = my_cur_col_id[(i - M_start) * 2 + 1];
indptr[i - M_start] = cur_nnz;
int eid;
for(eid = row_start; eid < row_end; eid++)
{
const int dst = Indices[eid];
if(dst >= K_end)
{
break;
}
if(cur_indices_id + cur_nnz >= nnz)
{
printf("Error! cur_indices_id + cur_nnz = %d, nnz = %d\n", cur_indices_id + cur_nnz, nnz);
exit(0);
}
indices[cur_indices_id + cur_nnz] = dst;
cur_nnz++;
}
my_cur_col_id[(i - M_start) * 2] = eid;
}
indptr[cur_csr.M] = cur_nnz;
cur_indices_id += cur_nnz;
block_csr_array[m * num_K_blocks + k] = cur_csr;
}
if(nnz != cur_indices_id)
{
printf("cur_indices_id = %d, expected = %d\n", cur_indices_id, nnz);
exit(0);
}
}
_mm_free(my_cur_col_id);
uint64_t tend = __rdtsc();
// printf("%d] %lu\n", tid, tend - tst);
}
endTick = __rdtsc();
// printf("stage 1: %lu\n", endTick - startTick);
// int nnz_ = static_cast<int32_t*>(csr.indptr)[M];
int nnz_ = static_cast<const IdType*>(IndPtr)[M];
#if VER
fprintf(stderr, "nthreads: %d, M: %d, K: %d, N: %d, nzz: %d\n",
nthreads, M, K, N, nnz_);
#endif
// #if FILEIO
// static int cnt = 0;
// if (N > 600) {
// cnt ++;
// FILE *fp = fopen("csr.txt", "a");
// fwrite(&M, sizeof(int32_t), 1, fp);
// fwrite(&K, sizeof(int32_t), 1, fp);
// fwrite(&N, sizeof(int32_t), 1, fp);
// fwrite(static_cast<int32_t*>(csr.indptr->data), sizeof(int32_t), M+1, fp);
// fwrite(static_cast<int32_t*>(csr.indices->data), sizeof(int32_t), nnz_, fp);
// fclose(fp);
// }
// if (cnt == 300)
// exit(0);
// #endif
#define PFD 160
startTick = __rdtsc();
int32_t N_block_start = 0;
int32_t N_block_end = N;
int rem = (N_block_end - N_block_start) & 0xf;
__mmask16 mask = (1 << rem) - 1;
__m512 zero512 = _mm512_setzero_ps();
#pragma omp parallel
{
int tid = omp_get_thread_num();
uint64_t tst = __rdtsc();
for(int32_t k = 0; k < num_K_blocks; k++)
{
#pragma omp for schedule(dynamic)
for(int32_t m = 0; m < num_M_blocks; m++)
{
//printf("m = %d\n", m);
csrm cur_csr = block_csr_array[m * num_K_blocks + k];
int32_t cur_M = cur_csr.M;
int32_t cur_K = cur_csr.K;
int32_t cur_N = cur_csr.N;
int32_t M_start = m * M_BLOCK_SIZE;
for(int i = 0; i < cur_M; i++)
{
const int row_start = cur_csr.indptr[i];
const int row_end = cur_csr.indptr[i + 1];
int32_t src = i + M_start;
int32_t eid;
for(eid = row_start; eid < (row_end - 4); eid+=4)
{
int j;
DType *Bptr0 = &B[cur_csr.indices[eid] * N + N_block_start];
DType *Bptr1 = &B[cur_csr.indices[eid + 1] * N + N_block_start];
DType *Bptr2 = &B[cur_csr.indices[eid + 2] * N + N_block_start];
DType *Bptr3 = &B[cur_csr.indices[eid + 3] * N + N_block_start];
DType *B_next_ptr0 = &B[cur_csr.indices[eid + 4] * N + N_block_start];
DType *B_next_ptr1 = &B[cur_csr.indices[eid + 5] * N + N_block_start];
DType *B_next_ptr2 = &B[cur_csr.indices[eid + 6] * N + N_block_start];
DType *B_next_ptr3 = &B[cur_csr.indices[eid + 7] * N + N_block_start];
DType *Cptr = &C[src * N + N_block_start];
#pragma unroll(16)
for(j = N_block_start; j < N_block_end - PFD; j += 16)
{
_mm_prefetch((const char *)(Bptr0 + PFD), _MM_HINT_T0);
_mm_prefetch((const char *)(Bptr1 + PFD), _MM_HINT_T0);
_mm_prefetch((const char *)(Bptr2 + PFD), _MM_HINT_T0);
_mm_prefetch((const char *)(Bptr3 + PFD), _MM_HINT_T0);
//B_next_ptr0 += 16;
//B_next_ptr1 += 16;
//B_next_ptr2 += 16;
//B_next_ptr3 += 16;
__m512 c512 = _mm512_loadu_ps(Cptr);
Cptr += 16;
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr0), c512);
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr1), c512);
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr2), c512);
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr3), c512);
Bptr0 += 16;
Bptr1 += 16;
Bptr2 += 16;
Bptr3 += 16;
_mm512_storeu_ps(&C[src * N + j], c512);
}
#pragma unroll(16)
for(; j < N_block_end - 15; j += 16)
{
_mm_prefetch((const char *)(B_next_ptr0), _MM_HINT_T0);
_mm_prefetch((const char *)(B_next_ptr1), _MM_HINT_T0);
_mm_prefetch((const char *)(B_next_ptr2), _MM_HINT_T0);
_mm_prefetch((const char *)(B_next_ptr3), _MM_HINT_T0);
B_next_ptr0 += 16;
B_next_ptr1 += 16;
B_next_ptr2 += 16;
B_next_ptr3 += 16;
__m512 c512 = _mm512_loadu_ps(Cptr);
Cptr += 16;
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr0), c512);
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr1), c512);
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr2), c512);
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr3), c512);
Bptr0 += 16;
Bptr1 += 16;
Bptr2 += 16;
Bptr3 += 16;
_mm512_storeu_ps(&C[src * N + j], c512);
}
__m512 c512 = _mm512_mask_loadu_ps(zero512, mask, &C[src * N + j]);
c512 = _mm512_add_ps(_mm512_mask_loadu_ps(zero512, mask, Bptr0), c512);
c512 = _mm512_add_ps(_mm512_mask_loadu_ps(zero512, mask, Bptr1), c512);
c512 = _mm512_add_ps(_mm512_mask_loadu_ps(zero512, mask, Bptr2), c512);
c512 = _mm512_add_ps(_mm512_mask_loadu_ps(zero512, mask, Bptr3), c512);
_mm512_mask_storeu_ps(&C[src * N + j], mask, c512);
}
for(; eid < (row_end - 1); eid++)
{
int32_t dst = cur_csr.indices[eid];
int32_t dst_next = cur_csr.indices[eid + 1];
int j;
DType *Bptr = &B[dst * N + N_block_start];
DType *B_next_ptr = &B[dst_next * N + N_block_start];
DType *Cptr = &C[src * N + N_block_start];
#pragma unroll(16)
for(j = N_block_start; j < N_block_end - 15; j += 16)
{
_mm_prefetch((const char *)(B_next_ptr), _MM_HINT_T0);
B_next_ptr += 16;
__m512 c512 = _mm512_loadu_ps(Cptr);
Cptr += 16;
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr), c512);
Bptr += 16;
_mm512_storeu_ps(&C[src * N + j], c512);
// if (N < 602)
// std::cout << " Check eid " << N << std::endl;
}
// if (N < 602)
// { std::cout << " Start loop --- outer " << N << std::endl;
// std::cout << " Start Block " << N_block_start << std::endl;
// std::cout << " End Block " << N_block_end << std::endl;
// }
__m512 c512 = _mm512_mask_loadu_ps(zero512, mask, &C[src * N + j]);
c512 = _mm512_add_ps(_mm512_mask_loadu_ps(zero512, mask, Bptr), c512);
_mm512_mask_storeu_ps(&C[src * N + j], mask, c512);
}
for(; eid < row_end; eid++)
{
int32_t src_next = src + 1;
int32_t dst = cur_csr.indices[eid];
int32_t dst_next = cur_csr.indices[eid + 1];
DType *Bptr = &B[dst * N + N_block_start];
DType *B_next_ptr = &B[dst_next * N + N_block_start];
DType *Cptr = &C[src * N + N_block_start];
DType *C_next_ptr = &C[src_next * N + N_block_start];
int j;
#pragma unroll(16)
for(j = N_block_start; j < N_block_end - 15; j += 16)
{
_mm_prefetch((const char *)(C_next_ptr), _MM_HINT_T0);
C_next_ptr += 16;
_mm_prefetch((const char *)(B_next_ptr), _MM_HINT_T0);
B_next_ptr += 16;
__m512 c512 = _mm512_loadu_ps(Cptr);
Cptr += 16;
c512 = _mm512_add_ps(_mm512_loadu_ps(Bptr), c512);
Bptr += 16;
_mm512_storeu_ps(&C[src * N + j], c512);
}
__m512 c512 = _mm512_mask_loadu_ps(zero512, mask, &C[src * N + j]);
c512 = _mm512_add_ps(_mm512_mask_loadu_ps(zero512, mask, Bptr), c512);
_mm512_mask_storeu_ps(&C[src * N + j], mask, c512);
}
}
}
}
uint64_t tend = __rdtsc();
// printf("%d] %lu\n", tid, tend - tst);
}
endTick = __rdtsc();
// printf("stage2 ticks = %ld\n", endTick - startTick);
for(int m = 0; m < num_M_blocks; m++)
{
for(int k = 0; k < num_K_blocks; k++)
{
_mm_free(block_csr_array[m * num_K_blocks + k].indptr);
}
_mm_free(block_csr_array[m * num_K_blocks].indices);
}
#undef K_BLOCK_SIZE
#undef K_BLOCK_MASK
#undef N_BLOCK_SIZE
#undef SORT
}
#else
// // -------------------------------------------------------------------------------------------------
// // --------------------------------------- Default SpMMSumCsr --------------------------------------
// // -------------------------------------------------------------------------------------------------
template <typename IdType, typename DType, typename Op>
void SpMMSumCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType *out_off = O + rid * dim;
std::fill(out_off, out_off + dim, 0);
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType *lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType *rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
out_off[k] += Op::Call(lhs_off, rhs_off);
}
}
}
}
#endif
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = coo.row.Ptr<IdType>();
const IdType* col = coo.col.Ptr<IdType>();
const IdType* edges = coo.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
const int64_t nnz = coo.row->shape[0];
// fill zero elements
memset(O, 0, out.GetSize());
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (val != 0) {
#pragma omp atomic
out_off[k] += val;
}
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note It uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
* \note The result will contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges = has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
std::fill(out_off, out_off + dim, Cmp::zero);
if (Op::use_lhs)
std::fill(argx_off, argx_off + dim, 0);
if (Op::use_rhs)
std::fill(argw_off, argw_off + dim, 0);
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs)
argx_off[k] = cid;
if (Op::use_rhs)
argw_off[k] = eid;
}
}
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
* \note The result will contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = static_cast<IdType*>(coo.row->data);
const IdType* col = static_cast<IdType*>(coo.col->data);
const IdType* edges = has_idx? static_cast<IdType*>(coo.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
const int64_t nnz = coo.row->shape[0];
// fill zero elements
std::fill(O, O + out.NumElements(), Cmp::zero);
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
IdType* argx_off = Op::use_lhs? argX + cid * dim : nullptr;
IdType* argw_off = Op::use_rhs? argW + cid * dim : nullptr;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs)
argx_off[k] = rid;
if (Op::use_rhs)
argw_off[k] = eid;
}
}
}
}
namespace op {
//////////////////////////////// binary operators on CPU ////////////////////////////////
template <typename DType>
struct Add {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off + *rhs_off;
}
};
template <typename DType> constexpr bool Add<DType>::use_lhs;
template <typename DType> constexpr bool Add<DType>::use_rhs;
template <typename DType>
struct Sub {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off - *rhs_off;
}
};
template <typename DType> constexpr bool Sub<DType>::use_lhs;
template <typename DType> constexpr bool Sub<DType>::use_rhs;
template <typename DType>
struct Mul {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off * *rhs_off;
}
};
template <typename DType> constexpr bool Mul<DType>::use_lhs;
template <typename DType> constexpr bool Mul<DType>::use_rhs;
template <typename DType>
struct Div {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off / *rhs_off;
}
};
template <typename DType> constexpr bool Div<DType>::use_lhs;
template <typename DType> constexpr bool Div<DType>::use_rhs;
template <typename DType>
struct CopyLhs {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = false;
inline static DType Call(const DType* lhs_off, const DType* ) {
return *lhs_off;
}
};
template <typename DType> constexpr bool CopyLhs<DType>::use_lhs;
template <typename DType> constexpr bool CopyLhs<DType>::use_rhs;
template <typename DType>
struct CopyRhs {
static constexpr bool use_lhs = false;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* , const DType* rhs_off) {
return *rhs_off;
}
};
template <typename DType> constexpr bool CopyRhs<DType>::use_lhs;
template <typename DType> constexpr bool CopyRhs<DType>::use_rhs;
//////////////////////////////// Reduce operators on CPU ////////////////////////////////
template <typename DType>
struct Max {
static constexpr DType zero = -std::numeric_limits<DType>::infinity();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum < val;
}
};
template <typename DType> constexpr DType Max<DType>::zero;
template <typename DType>
struct Min {
static constexpr DType zero = std::numeric_limits<DType>::infinity();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum > val;
}
};
template <typename DType> constexpr DType Min<DType>::zero;
#define SWITCH_OP(op, Op, ...) \
do { \
if ((op) == "add") { \
typedef dgl::aten::cpu::op::Add<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "sub") { \
typedef dgl::aten::cpu::op::Sub<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "mul") { \
typedef dgl::aten::cpu::op::Mul<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "div") { \
typedef dgl::aten::cpu::op::Div<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_lhs") { \
typedef dgl::aten::cpu::op::CopyLhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_rhs") { \
typedef dgl::aten::cpu::op::CopyRhs<DType> Op; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \
} \
} while (0)
} // namespace op
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SPMM_H_
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,sizeof(distort_args));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
PixelInfo
invalid; /* the color to assign when distort result is invalid */
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0],coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3],coeff[4],coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates,
so that is what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel color to assign to distorted image */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
GB_binop__div_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__div_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__div_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__div_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fp64)
// A*D function (colscale): GB (_AxD__div_fp64)
// D*A function (rowscale): GB (_DxB__div_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__div_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__div_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fp64)
// C=scalar+B GB (_bind1st__div_fp64)
// C=scalar+B' GB (_bind1st_tran__div_fp64)
// C=A+scalar GB (_bind2nd__div_fp64)
// C=A'+scalar GB (_bind2nd_tran__div_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij / bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x / y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FP64 || GxB_NO_DIV_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__div_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x / bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij / y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x / aij) ; \
}
GrB_Info GB (_bind1st_tran__div_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / y) ; \
}
GrB_Info GB (_bind2nd_tran__div_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB097-target-teams-distribute-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#define min(x, y) (((x) < (y)) ? (x) : (y))
/*
use of omp target + teams + distribute + parallel for
*/
int main(int argc, char* argv[])
{
int i, i2;
int len = 2560;
double sum =0.0, sum2=0.0;
double a[len], b[len];
/*Initialize with some values*/
#pragma omp parallel for private(i)
for (i=0; i<len; i++)
{
a[i]= ((double)i)/2.0;
b[i]= ((double)i)/3.0;
}
#pragma omp parallel for private(i, i2) reduction (+:sum)
for (i2=0; i2< len; i2+=256)
#pragma omp parallel for private(i) reduction(+:sum)
for (i=i2;i< min(i2+256, len); i++)
sum += a[i]*b[i];
/* CPU reference computation */
#pragma omp parallel for private(i) reduction (+:sum2)
for (i=0;i< len; i++)
sum2 += a[i]*b[i];
printf ("sum=%f sum2=%f\n", sum, sum2);
return 0;
}
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda/utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else if (NDim == 6) { \
const int ndim = 6; \
{__VA_ARGS__} \
} else if (NDim == 7) { \
const int ndim = 7; \
{__VA_ARGS__} \
} else if (NDim == 8) { \
const int ndim = 8; \
{__VA_ARGS__} \
} else if (NDim == 9) { \
const int ndim = 9; \
{__VA_ARGS__} \
} else if (NDim == 10) { \
const int ndim = 10; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
case mshadow::kBfloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_BFLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBfloat16: \
LOG(FATAL) << "This operation does not " \
"support bfloat16"; \
break; \
case mshadow::kInt8: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not bool"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not bool"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates */
template<int ndim>
MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) {
++(*coord)[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
}
return (*coord)[0] < shape[0];
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
template<typename OP, int req>
struct mixed_type_unary_op {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename OType, typename IType>
MSHADOW_XINLINE static void Map(index_t i, OType *out, const IType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i])));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
/*! \brief input is a tensor and the output is a boolean tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and two scalar value with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is two tensors with different type and with a boolean output tensor */
template<typename LType, typename RType,
typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const LType *lhs, const RType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t *out,
const DType *lhs,
const mshadow::half::half_t *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a double output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t *out,
const DType *lhs,
const mshadow::half::half_t value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a double output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is a tensor and a scalar value with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<bool val>
struct set_to_bool : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to true and false
*/
using set_true = set_to_bool<true>;
using set_false = set_to_bool<false>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
omp-spawn-n-tasks.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int main(int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr, "Usage: %s nTasks\n", argv[0]);
exit(EXIT_FAILURE);
}
int nTasks = atoi(argv[1]);
#pragma omp parallel
#pragma omp single nowait
#pragma omp taskloop nogroup grainsize(1)
for (int i=0; i<nTasks; i++){
usleep(5);
}
return EXIT_SUCCESS;
} |
ex06.c | /* Copyright (c) 2021 ENCCS */
#include <stdio.h>
int main(void)
{
int x = 0;
#pragma omp target data map(tofrom:x)
{
/* check point 1 */
x = 10;
/* check point 2 */
#pragma omp target update to(x)
/* check point 3 */
}
return 0;
}
|
master.c | //=====================================================================
// MAIN FUNCTION
//=====================================================================
void master(fp timeinst, fp *initvalu, fp *parameter, fp *finavalu, int mode) {
//=====================================================================
// VARIABLES
//=====================================================================
// counters
int i;
// intermediate output on host
fp JCaDyad;
fp JCaSL;
fp JCaCyt;
// offset pointers
int initvalu_offset_batch; //
int initvalu_offset_ecc; // 46 points
int parameter_offset_ecc;
int initvalu_offset_Dyad; // 15 points
int parameter_offset_Dyad;
int initvalu_offset_SL; // 15 points
int parameter_offset_SL;
int initvalu_offset_Cyt; // 15 poitns
int parameter_offset_Cyt;
// module parameters
fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaSL; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] ***
// thread counters
int th_id, nthreads;
int th_count[4];
int temp;
//=====================================================================
// KERNELS FOR 1 WORKLOAD - PARALLEL
//=====================================================================
nthreads = omp_get_max_threads();
if (mode == 0) {
// partition workload between threads
temp = 0;
for (i = 0; i < 4; i++) { // do for all 4 pieces of work
if (temp >= nthreads) { // limit according to number of threads
temp = 0;
}
th_count[i] = temp; // assign thread to piece of work
temp = temp + 1;
}
// run pieces of work in parallel
#pragma omp parallel private(th_id)
{
if (th_id == th_count[1]) {
// ecc function
initvalu_offset_ecc = 0; // 46 points
parameter_offset_ecc = 0;
ecc(timeinst, initvalu, initvalu_offset_ecc, parameter,
parameter_offset_ecc, finavalu);
}
if (th_id == th_count[2]) {
// cam function for Dyad
initvalu_offset_Dyad = 46; // 15 points
parameter_offset_Dyad = 1;
CaDyad =
initvalu[35] *
1e3; // from ECC model, *** Converting from [mM] to [uM] ***
JCaDyad =
cam(timeinst, initvalu, initvalu_offset_Dyad, parameter,
parameter_offset_Dyad, finavalu, CaDyad);
}
if (th_id == th_count[3]) {
// cam function for SL
initvalu_offset_SL = 61; // 15 points
parameter_offset_SL = 6;
CaSL =
initvalu[36] *
1e3; // from ECC model, *** Converting from [mM] to [uM] ***
JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter,
parameter_offset_SL, finavalu, CaSL);
}
if (th_id == th_count[4]) {
// cam function for Cyt
initvalu_offset_Cyt = 76; // 15 poitns
parameter_offset_Cyt = 11;
CaCyt =
initvalu[37] *
1e3; // from ECC model, *** Converting from [mM] to [uM] ***
JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter,
parameter_offset_Cyt, finavalu, CaCyt);
}
}
}
//=====================================================================
// KERNELS FOR MANY WORKLOAD - SERIAL
//=====================================================================
else {
// ecc function
initvalu_offset_ecc = 0; // 46 points
parameter_offset_ecc = 0;
ecc(timeinst, initvalu, initvalu_offset_ecc, parameter,
parameter_offset_ecc, finavalu);
// cam function for Dyad
initvalu_offset_Dyad = 46; // 15 points
parameter_offset_Dyad = 1;
CaDyad = initvalu[35] *
1e3; // from ECC model, *** Converting from [mM] to [uM] ***
JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter,
parameter_offset_Dyad, finavalu, CaDyad);
// cam function for SL
initvalu_offset_SL = 61; // 15 points
parameter_offset_SL = 6;
CaSL = initvalu[36] *
1e3; // from ECC model, *** Converting from [mM] to [uM] ***
JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter,
parameter_offset_SL, finavalu, CaSL);
// cam function for Cyt
initvalu_offset_Cyt = 76; // 15 poitns
parameter_offset_Cyt = 11;
CaCyt = initvalu[37] *
1e3; // from ECC model, *** Converting from [mM] to [uM] ***
JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter,
parameter_offset_Cyt, finavalu, CaCyt);
}
//=====================================================================
// FINAL KERNEL
//=====================================================================
// final adjustments
fin(initvalu, initvalu_offset_ecc, initvalu_offset_Dyad, initvalu_offset_SL,
initvalu_offset_Cyt, parameter, finavalu, JCaDyad, JCaSL, JCaCyt);
//=====================================================================
// COMPENSATION FOR NANs and INFs
//=====================================================================
// make sure function does not return NANs and INFs
for (i = 0; i < EQUATIONS; i++) {
if (isnan(finavalu[i]) == 1) {
finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001
} else if (isinf(finavalu[i]) == 1) {
finavalu[i] = 0.0001; // for INF set rate of change to 0.0001
}
}
}
|
modal_analysis_builder_and_solver.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: janosch $
* Date: $Date: 2008-04-29 12:23:09 $
* Revision: $Revision: 1.1 $
*
* ***********************************************************/
#if !defined(KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER )
#define KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <omp.h>
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "linear_solvers/power_iteration_eigenvalue_solver.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
Current class provides an implementation for standard builder and solving operations.
the RHS is constituted by the unbalanced loads (residual)
Degrees of freedom are reordered putting the restrained degrees of freedom at
the end of the system ordered in reverse order with respect to the DofSet.
Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information.
Calculation of the reactions involves a cost very similiar to the calculation of the total residual
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace , //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ModalAnalysisBuilderAndSolver
: public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
//typedef boost::shared_ptr< ModalAnalysisBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver> > Pointer;
KRATOS_CLASS_POINTER_DEFINITION( ModalAnalysisBuilderAndSolver );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ModalAnalysisBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver)
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
virtual ~ModalAnalysisBuilderAndSolver(){}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
if(!pScheme)
KRATOS_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
}
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH("finished parallel building");
/* LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
*/
//for( int i=0; i<A.size1(); i++ )
//{
// for( int j=0; j<A.size2(); j++ )
// {
// std::cout << A(i,j);
// }
// std::cout << std::endl;
//}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A)
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A)
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
double start_solve = omp_get_wtime();
double norm_b;
if(b.size() != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if(norm_b != 0.00)
BaseType::mpLinearSystemSolver->Solve(A,Dx,b);
else
TSparseSpace::SetToZero(Dx);
//prints informations about the current time
if (BaseType::GetEchoLevel()>1)
{
std::cout << *(BaseType::mpLinearSystemSolver) << std::endl;
}
double stop_solve= omp_get_wtime();
std::cout << "time: " << stop_solve - start_solve << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildAndSolve( typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
KRATOS_TRY
boost::timer building_time;
//construct mass matrix structure
TSystemMatrixType M = TSystemMatrixType( A.size1(), A.size2() );
//build matrices
BuildSystemMatrices( pScheme, r_model_part, A, M );
//elapsed time
if(BaseType::GetEchoLevel()>0)
{
std::cout << "Building Time : " << building_time.elapsed() << std::endl;
}
if (BaseType::GetEchoLevel()== 3)
{
std::cout << "before the solution of the system" << std::endl;
std::cout << "stiffness Matrix = " << A << std::endl;
std::cout << "mass Matrix = " << M << std::endl;
std::cout << "unknowns vector = " << Dx << std::endl;
std::cout << "RHS vector = " << b << std::endl;
}
boost::timer solve_time;
// SystemSolve(A,Dx,b);
PowerIterationEigenvalueSolver<TSparseSpace, TDenseSpace, TLinearSolver>
eigenvalue_solver( 1.0e-8, 1000, 1, BaseType::mpLinearSystemSolver );
LocalSystemVectorType Eigenvalues(1);
LocalSystemMatrixType Eigenvectors(1,1);
eigenvalue_solver.Solve( A, M, Eigenvalues, Eigenvectors);
if(BaseType::GetEchoLevel()>0)
{
std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl;
}
if (BaseType::GetEchoLevel()== 3)
{
std::cout << "after the solution of the system" << std::endl;
std::cout << "Eigenvalues = " << Eigenvalues << std::endl;
std::cout << "Eigenvectors = " << Eigenvectors << std::endl;
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
BuildRHS(pScheme,r_model_part,b);
SystemSolve(A,Dx,b);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part )
{
KRATOS_TRY
KRATOS_WATCH("setting up the dofs");
//Gets the array of elements from the modeler
ElementsArrayType& pElements = r_model_part.Elements();
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
//mDofSet.clear();
//double StartTime = GetTickCount();
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin();
it!=pElements.ptr_end(); ++it)
{
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo);
for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ;
i != ElementalDofList.end() ; ++i)
{
Doftemp.push_back(*i);
//mDofSet.push_back(*i);
}
}
//taking in account conditions
ConditionsArrayType& pConditions = r_model_part.Conditions();
for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin();
it!=pConditions.ptr_end(); ++it)
{
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo);
for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ;
i != ElementalDofList.end() ; ++i)
{
//mDofSet.push_back(*i);
Doftemp.push_back(*i);
}
}
Doftemp.Unique();
BaseType::mDofSet = Doftemp;
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpSystem(
ModelPart& r_model_part
)
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ElementsArrayType& rElements,
ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo
)
{
KRATOS_TRY
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false);
ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo);
}
else
{
if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true);
ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo);
}
}
if(Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize,false);
if(b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize,false);
//
//if needed resize the vector for the calculation of reactions
if(BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size()-BaseType::mEquationSystemSize;
if(BaseType::mReactionsVector.size() != ReactionsVectorSize)
BaseType::mReactionsVector.resize(ReactionsVectorSize,false);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme,r_model_part,b);
int i;
int systemsize = BaseType::mDofSet.size() - BaseType::mReactionsVector.size();
typename DofsArrayType::ptr_iterator it2;
//std::set<Dof::Pointer,ComparePDof>::iterator it2;
//updating variables
//for (it2=mDofSet.begin();it2 != mDofSet.end(); ++it2)
for (it2=BaseType::mDofSet.ptr_begin();it2 != BaseType::mDofSet.ptr_end(); ++it2)
{
if ( (*it2)->IsFixed() )
{
i=(*it2)->EquationId();
i-=systemsize;
(*it2)->GetSolutionStepReactionValue() = BaseType::mReactionsVector[i];
}
}
}
//**************************************************************************
//**************************************************************************
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{}
//**************************************************************************
//**************************************************************************
void ApplyPointLoads(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{}
/**
this function is intended to be called at the end of the solution step to clean up memory
storage not needed
*/
void Clear()
{
this->mDofSet = DofsArrayType();
this->mReactionsVector = TSystemVectorType();
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH("ModalAnalysisBuilderAndSolver Clear Function called");
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
virtual void ConstructMatrixStructure(
TSystemMatrixType& A,
ElementsContainerType& rElements,
ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo)
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices(equation_size);
// std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix));
Element::EquationIdVectorType ids(3,0);
for(typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++)
{
(i_element)->EquationIdVector(ids, CurrentProcessInfo);
for(std::size_t i = 0 ; i < ids.size() ; i++)
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; j++)
if(ids[j] < equation_size)
{
AddUnique(row_indices,ids[j]);
//indices[ids[i]].push_back(ids[j]);
}
}
}
for(typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++)
{
(i_condition)->EquationIdVector(ids, CurrentProcessInfo);
for(std::size_t i = 0 ; i < ids.size() ; i++)
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; j++)
if(ids[j] < equation_size)
{
AddUnique(row_indices,ids[j]);
// indices[ids[i]].push_back(ids[j]);
}
}
}
//allocating the memory needed
int data_size = 0;
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
data_size += indices[i].size();
}
A.reserve(data_size,false);
//filling with zero the matrix (creating the structure)
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i,*it,0.00);
// A()(i,*it) = 0.00;
}
//row_indices = std::vector<std::size_t>();
row_indices.clear();
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
unsigned int j_global=EquationId[j_local];
if ( j_global < BaseType::mEquationSystemSize )
{
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag==false) //if we don't need to calculate reactions
{
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{ // ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
}
}
else //when the calculation of reactions is needed
{
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{ // ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
else //on "fixed" DOFs
{ // Assembling the Vector of REACTIONS
BaseType::mReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local];
}
}
}
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
//**************************************************************************
//**************************************************************************
void BuildSystemMatrices(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& K,
TSystemMatrixType& M )
{
KRATOS_TRY
if(!pScheme)
KRATOS_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType K_Contribution = LocalSystemMatrixType(0,0);
LocalSystemMatrixType M_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator
it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator
it_end=pElements.ptr_begin()+element_partition[k+1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->MassMatrix( M_Contribution, CurrentProcessInfo );
(*it)->CalculateLocalSystem( K_Contribution,RHS_Contribution,CurrentProcessInfo );
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(K,K_Contribution,EquationId);
AssembleLHS(M,M_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
}
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType K_Contribution = LocalSystemMatrixType(0,0);
LocalSystemMatrixType M_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator
it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator
it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->MassMatrix( M_Contribution, CurrentProcessInfo );
(*it)->CalculateLocalSystem( K_Contribution,RHS_Contribution,CurrentProcessInfo );
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(K,K_Contribution,EquationId);
AssembleLHS(M,M_Contribution,EquationId);
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH("finished parallel building");
KRATOS_CATCH("")
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
int j_global=EquationId[j_local];
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
//******************************************************************************************
//******************************************************************************************
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while ( i != endit && (*i) != candidate)
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ModalAnalysisBuilderAndSolver */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER defined */
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
typedef struct _LCMSInfo
{
ColorspaceType
colorspace;
cmsUInt32Number
type;
size_t
channels;
cmsHPROFILE
profile;
int
intent;
double
scale,
translate;
void
**magick_restrict pixels;
} LCMSInfo;
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static void **DestroyPixelThreadSet(void **pixels)
{
ssize_t
i;
if (pixels == (void **) NULL)
return((void **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (void *) NULL)
pixels[i]=RelinquishMagickMemory(pixels[i]);
pixels=(void **) RelinquishMagickMemory(pixels);
return(pixels);
}
static void **AcquirePixelThreadSet(const size_t columns,
const size_t channels,MagickBooleanType highres)
{
ssize_t
i;
size_t
number_threads;
size_t
size;
void
**pixels;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (void **) NULL)
return((void **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
size=sizeof(double);
if (highres == MagickFalse)
size=sizeof(Quantum);
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=AcquireQuantumMemory(columns,channels*size);
if (pixels[i] == (void *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info,
const LCMSInfo *target_info,const cmsUInt32Number flags,
cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile,
source_info->type,target_info->profile,target_info->type,
target_info->intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context);
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s', %s (#%u)",image->filename,
message != (char *) NULL ? message : "no message",severity);
}
static void TransformDoublePixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
#define GetLCMSPixel(source_info,pixel) \
(source_info->scale*QuantumScale*(pixel)+source_info->translate)
#define SetLCMSPixel(target_info,pixel) \
ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate)
double
*p;
ssize_t
x;
p=(double *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetLCMSPixel(source_info,GetPixelRed(image,q));
if (source_info->channels > 1)
{
*p++=GetLCMSPixel(source_info,GetPixelGreen(image,q));
*p++=GetLCMSPixel(source_info,GetPixelBlue(image,q));
}
if (source_info->channels > 3)
*p++=GetLCMSPixel(source_info,GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(double *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,SetLCMSPixel(target_info,*p),q);
else
SetPixelRed(image,SetLCMSPixel(target_info,*p),q);
p++;
if (target_info->channels > 1)
{
SetPixelGreen(image,SetLCMSPixel(target_info,*p),q);
p++;
SetPixelBlue(image,SetLCMSPixel(target_info,*p),q);
p++;
}
if (target_info->channels > 3)
{
SetPixelBlack(image,SetLCMSPixel(target_info,*p),q);
p++;
}
q+=GetPixelChannels(image);
}
}
static void TransformQuantumPixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
Quantum
*p;
ssize_t
x;
p=(Quantum *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetPixelRed(image,q);
if (source_info->channels > 1)
{
*p++=GetPixelGreen(image,q);
*p++=GetPixelBlue(image,q);
}
if (source_info->channels > 3)
*p++=GetPixelBlack(image,q);
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(Quantum *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,*p++,q);
else
SetPixelRed(image,*p++,q);
if (target_info->channels > 1)
{
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
}
if (target_info->channels > 3)
SetPixelBlack(image,*p++,q);
q+=GetPixelChannels(image);
}
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#ifndef TYPE_XYZ_8
#define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1))
#endif
#define ThrowProfileException(severity,tag,context) \
{ \
if (profile != (StringInfo *) NULL) \
profile=DestroyStringInfo(profile); \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_info.profile); \
if (target_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_info.profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsContext
cms_context;
CMSExceptionInfo
cms_exception;
LCMSInfo
source_info,
target_info;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_exception.image=image;
cms_exception.exception=exception;
cms_context=cmsCreateContext(NULL,&cms_exception);
if (cms_context == (cmsContext) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler);
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_info.profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
cmsColorSpaceSignature
signature;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
const char
*artifact;
#endif
MagickBooleanType
highres;
MagickOffsetType
progress;
ssize_t
y;
target_info.profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_info.profile=source_info.profile;
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_info.profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
highres=MagickTrue;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
artifact=GetImageArtifact(image,"profile:highres-transform");
if (IsStringFalse(artifact) != MagickFalse)
highres=MagickFalse;
#endif
source_info.scale=1.0;
source_info.translate=0.0;
source_info.colorspace=sRGBColorspace;
source_info.channels=3;
switch (cmsGetColorSpace(source_info.profile))
{
case cmsSigCmykData:
{
source_info.colorspace=CMYKColorspace;
source_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_info.scale=100.0;
}
break;
}
case cmsSigGrayData:
{
source_info.colorspace=GRAYColorspace;
source_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
source_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
source_info.scale=100.0;
source_info.translate=(-0.5);
}
break;
}
case cmsSigRgbData:
{
source_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
signature=cmsGetPCS(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_info.profile);
target_info.scale=1.0;
target_info.translate=0.0;
target_info.channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_info.colorspace=CMYKColorspace;
target_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_info.scale=0.01;
}
break;
}
case cmsSigGrayData:
{
target_info.colorspace=GRAYColorspace;
target_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
target_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
target_info.scale=0.01;
target_info.translate=0.5;
}
break;
}
case cmsSigRgbData:
{
target_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent:
{
target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC;
break;
}
case PerceptualIntent:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
case RelativeIntent:
{
target_info.intent=INTENT_RELATIVE_COLORIMETRIC;
break;
}
case SaturationIntent:
{
target_info.intent=INTENT_SATURATION;
break;
}
default:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(&source_info,&target_info,
flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_info.pixels=AcquirePixelThreadSet(image->columns,
source_info.channels,highres);
target_info.pixels=AcquirePixelThreadSet(image->columns,
target_info.channels,highres);
if ((source_info.pixels == (void **) NULL) ||
(target_info.pixels == (void **) NULL))
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if (source_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
return(MagickFalse);
}
if (target_info.colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_info.colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (highres != MagickFalse)
TransformDoublePixels(id,image,&source_info,&target_info,transform,q);
else
TransformQuantumPixels(id,image,&source_info,&target_info,transform,q);
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_info.colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
}
(void) cmsCloseProfile(source_info.profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static void PatchCorruptProfile(const char *name,StringInfo *profile)
{
unsigned char
*p;
size_t
length;
/*
Detect corrupt profiles and if discovered, repair.
*/
if (LocaleCompare(name,"xmp") == 0)
{
/*
Remove garbage after xpacket end.
*/
p=GetStringInfoDatum(profile);
p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>");
if (p != (unsigned char *) NULL)
{
p+=19;
length=p-GetStringInfoDatum(profile);
if (length != GetStringInfoLength(profile))
{
*p='\0';
SetStringInfoLength(profile,length);
}
}
return;
}
if (LocaleCompare(name,"exif") == 0)
{
/*
Check if profile starts with byte order marker instead of Exif.
*/
p=GetStringInfoDatum(profile);
if ((LocaleNCompare((const char *) p,"MM",2) == 0) ||
(LocaleNCompare((const char *) p,"II",2) == 0))
{
const unsigned char
profile_start[] = "Exif\0\0";
StringInfo
*exif_profile;
exif_profile=AcquireStringInfo(6);
if (exif_profile != (StringInfo *) NULL)
{
SetStringInfoDatum(exif_profile,profile_start);
ConcatenateStringInfo(exif_profile,profile);
SetStringInfoLength(profile,GetStringInfoLength(exif_profile));
SetStringInfo(profile,exif_profile);
exif_profile=DestroyStringInfo(exif_profile);
}
}
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(Image *image,
const StringInfo *profile,ExceptionInfo *exception)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s' (XMP)",image->filename);
return(MagickFalse);
}
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(Image *image,
const StringInfo *profile,ExceptionInfo *exception)
{
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename);
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
MagickBooleanType
status;
StringInfo
*clone_profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
clone_profile=CloneStringInfo(profile);
PatchCorruptProfile(name,clone_profile);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(image,clone_profile,exception) == MagickFalse))
{
clone_profile=DestroyStringInfo(clone_profile);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),clone_profile);
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,clone_profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,clone_profile);
}
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
static void UpdateClipPath(unsigned char *blob,size_t length,
const size_t old_columns,const size_t old_rows,
const RectangleInfo *new_geometry)
{
ssize_t
i;
ssize_t
knot_count,
selector;
knot_count=0;
while (length != 0)
{
selector=(ssize_t) ReadProfileMSBShort(&blob,&length);
switch (selector)
{
case 0:
case 3:
{
if (knot_count != 0)
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Expected subpath length record.
*/
knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length);
blob+=22;
length-=MagickMin(22,(ssize_t) length);
break;
}
case 1:
case 2:
case 4:
case 5:
{
if (knot_count == 0)
{
/*
Unexpected subpath knot.
*/
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Add sub-path knot
*/
for (i=0; i < 3; i++)
{
double
x,
y;
signed int
xx,
yy;
y=(double) ReadProfileMSBLong(&blob,&length);
y=y*old_rows/4096/4096;
y-=new_geometry->y;
yy=(signed int) ((y*4096*4096)/new_geometry->height);
WriteProfileLong(MSBEndian,(size_t) yy,blob-4);
x=(double) ReadProfileMSBLong(&blob,&length);
x=x*old_columns/4096/4096;
x-=new_geometry->x;
xx=(signed int) ((x*4096*4096)/new_geometry->width);
WriteProfileLong(MSBEndian,(size_t) xx,blob-4);
}
knot_count--;
break;
}
case 6:
case 7:
case 8:
default:
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
}
}
}
MagickPrivate void Update8BIMClipPath(const Image *image,
const size_t old_columns,const size_t old_rows,
const RectangleInfo *new_geometry)
{
const StringInfo
*profile;
size_t
length;
ssize_t
count,
id;
unsigned char
*info;
assert(image != (Image *) NULL);
assert(new_geometry != (RectangleInfo *) NULL);
profile=GetImageProfile(image,"8bim");
if (profile == (StringInfo *) NULL)
return;
length=GetStringInfoLength(profile);
info=GetStringInfoDatum(profile);
while (length > 0)
{
if (ReadProfileByte(&info,&length) != (unsigned char) '8')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'B')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'I')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'M')
continue;
id=(ssize_t) ReadProfileMSBShort(&info,&length);
count=(ssize_t) ReadProfileByte(&info,&length);
if ((count != 0) && ((size_t) count <= length))
{
info+=count;
length-=count;
}
if ((count & 0x01) == 0)
(void) ReadProfileByte(&info,&length);
count=(ssize_t) ReadProfileMSBLong(&info,&length);
if ((count < 0) || ((size_t) count > length))
{
length=0;
continue;
}
if ((id > 1999) && (id < 2999))
UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry);
info+=count;
length-=MagickMin(count,(ssize_t) length);
}
}
|
volumeramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2016-2021 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#pragma once
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/volume/volume.h>
#include <inviwo/core/datastructures/volume/volumeramprecision.h>
#ifdef IVW_USE_OPENMP
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in grid index space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField, const Matrix<3, U> basis,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField, const Matrix<3, U> basis,
const size3_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField,
const Matrix<3, U> basis, const size3_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifdef IVW_USE_OPENMP
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T* src = inVolume->getDataTyped();
U* dst = outDistanceField->getDataTyped();
const i64vec3 srcDim{inVolume->getDimensions()};
const i64vec3 dstDim{outDistanceField->getDimensions()};
const i64vec3 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<3, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1], squareBasis[2][2]};
const Vector<3, U> squareVoxelSize{squareBasisDiag / Vector<3, U>{dstDim * dstDim}};
const Vector<3, U> invSquareVoxelSize{Vector<3, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"volumeRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal volume will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("volumeRAMDistanceTransform"));
}
util::IndexMapper<3, int64> srcInd(srcDim);
util::IndexMapper<3, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y, const int64 z) {
return predicate(src[srcInd(x / sm.x, y / sm.y, z / sm.z)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] =
std::min<U>(dst[dstInd(x, y, z)], squareVoxelSize.x * square(dist));
}
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.3);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.y);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y, z)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// third pass, scan z direction
// for each voxel v(x,y,z) find min_i(data(x,y,i) + (z - i)^2), 0 <= i < dimZ
// result: min distance in x and y direction
callback(0.6);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.z);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 y = 0; y < dstDim.y; ++y) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 z = 0; z < dstDim.z; ++z) {
buff[z] = dst[dstInd(x, y, z)];
}
for (int64 z = 0; z < dstDim.z; ++z) {
auto d = buff[z];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.z)) + 1;
const auto rStart = std::min(rMax, z - 1);
const auto rEnd = std::min(rMax, dstDim.z - z);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[z + n] + squareVoxelSize.z * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// scale data
callback(0.9);
const int64 volSize = dstDim.x * dstDim.y * dstDim.z;
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 i = 0; i < volSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField,
const Matrix<3, U> basis, const size3_t upsample) {
util::volumeRAMDistanceTransform(
inVolume, outDistanceField, basis, upsample,
[](const T& val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U& squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale,
ProgressCallback progress) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
using ValueType = util::PrecisionValueType<decltype(vrprecision)>;
const auto predicateIn = [threshold](const ValueType& val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType& val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float& squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float& squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::volumeDistanceTransform(inVolume, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
#if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
#include "MagickCore/threshold-map.h"
#else
static const char *const
BuiltinMap=
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
#endif
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorThresholdImage() forces all pixels in the color range to white
% otherwise black.
%
% The format of the ColorThresholdImage method is:
%
% MagickBooleanType ColorThresholdImage(Image *image,
% const PixelInfo *start_color,const PixelInfo *stop_color,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o start_color, stop_color: define the start and stop color range. Any
% pixel within the range returns white otherwise black.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorThresholdImage(Image *image,
const PixelInfo *start_color,const PixelInfo *stop_color,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
start,
stop;
ssize_t
y;
/*
Color threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=AcquireImageColormap(image,2,exception);
if (status == MagickFalse)
return(status);
start=(*start_color);
stop=(*stop_color);
switch (image->colorspace)
{
case HCLColorspace:
{
ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
case LabColorspace:
{
ConvertRGBToLab(start_color->red,start_color->green,start_color->blue,
&start.red,&start.green,&start.blue);
ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue,
&stop.red,&stop.green,&stop.blue);
break;
}
default:
{
start.red*=QuantumScale;
start.green*=QuantumScale;
start.blue*=QuantumScale;
stop.red*=QuantumScale;
stop.green*=QuantumScale;
stop.blue*=QuantumScale;
break;
}
}
start.red*=QuantumRange;
start.green*=QuantumRange;
start.blue*=QuantumRange;
stop.red*=QuantumRange;
stop.green*=QuantumRange;
stop.blue*=QuantumRange;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickBooleanType
foreground = MagickTrue;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((q[i] < GetPixelInfoChannel(&start,channel)) ||
(q[i] > GetPixelInfoChannel(&stop,channel)))
foreground=MagickFalse;
}
SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->colorspace=sRGBColorspace;
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels to dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with an ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum black threshold value.
%
% o low_white: Define the minimum white threshold value.
%
% o high_white: Define the maximum white threshold value.
%
% o high_black: Define the maximum black threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=(Quantum) 0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=(Quantum) 0;
else
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
LAGraph_pagerank3b.c | //------------------------------------------------------------------------------
// LAGraph_pagerank3b: pagerank using a real semiring
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
// LAGraph_pagerank3b: Alternative PageRank implementation using a real
// semiring.
//
// This algorithm follows the specification given in the GAP Benchmark Suite:
// https://arxiv.org/abs/1508.03619
// For fastest results, the input matrix should be GrB_FP32, stored in
// GxB_BY_COL format.
#include "LAGraph.h"
#define LAGRAPH_FREE_ALL { \
GrB_free(&transpose_desc); \
GrB_free(&invmask_desc); \
GrB_free(&A); \
GrB_free(&G); \
GrB_free(&grb_d_out); \
GrB_free(&importance_vec); \
GrB_free(&grb_pr); \
};
// uncomment this to see the intermidiate resluts; lots of prints!!
//#undef NDEBUG
// uncomment this to see the timing info
#define PRINT_TIMING_INFO
GrB_Info LAGraph_pagerank3b // PageRank definition
(
GrB_Vector *result, // output: array of LAGraph_PageRank structs
GrB_Matrix A_input, // binary input graph, not modified
float damping_factor, // damping factor
unsigned long itermax, // maximum number of iterations
int* iters // output: number of iterations taken
)
{
GrB_Info info;
GrB_Index n;
GrB_Descriptor invmask_desc = NULL ;
GrB_Descriptor transpose_desc = NULL ;
GrB_Vector grb_d_out = NULL ;
GrB_Matrix A = NULL ;
#ifdef PRINT_TIMING_INFO
// start the timer
double tic [2] ;
LAGraph_tic (tic) ;
#endif
GrB_Vector importance_vec = NULL ;
GrB_Vector grb_pr = NULL;
GrB_Matrix G = NULL ; // a dense row of zeros zeroes(1,n)
GrB_Index ncols ; //number of columnns
LAGRAPH_OK(GrB_Matrix_ncols(&ncols , A_input));
LAGRAPH_OK(GrB_Matrix_nrows(&n, A_input));
GrB_Index nvals;
LAGRAPH_OK(GrB_Matrix_nvals(&nvals, A_input));
if (ncols != n)
{
return (GrB_DIMENSION_MISMATCH) ;
}
LAGRAPH_OK(GrB_Matrix_new (&G, GrB_FP32, n, n));
LAGRAPH_OK(GrB_Matrix_new (&A, GrB_FP32, n, n));
LAGRAPH_OK(GxB_set (A, GxB_FORMAT, GxB_BY_COL));
// G is zeros in last row
for (GrB_Index c = 0; c < n; c++){
LAGRAPH_OK(GrB_Matrix_setElement (G, 0.0, n-1, c));
}
#ifndef NDEBUG
int print_size = 5; //number of entries get printed
print_size = (print_size > n)? n : print_size;
// GxB_print (G, 3) ;
#endif
// A = A_input + G;
LAGRAPH_OK(GrB_eWiseAdd (A, NULL, NULL, GrB_PLUS_FP32, A_input, G, NULL));
GrB_free (&G) ;
#ifndef NDEBUG
// GxB_print (A, 3) ;
#endif
// Create complement descriptor
LAGRAPH_OK(GrB_Descriptor_new(&invmask_desc));
LAGRAPH_OK(GrB_Descriptor_set(invmask_desc, GrB_MASK, GrB_SCMP));
// Create transpose descriptor
LAGRAPH_OK(GrB_Descriptor_new(&transpose_desc));
LAGRAPH_OK(GrB_Descriptor_set(transpose_desc, GrB_INP0, GrB_TRAN));
LAGRAPH_OK(GrB_Descriptor_set(transpose_desc, GrB_OUTP, GrB_REPLACE));
// Matrix A row sum
// Stores the outbound degrees of all vertices
LAGRAPH_OK(GrB_Vector_new(&grb_d_out, GrB_FP32, n));
LAGRAPH_OK(GrB_reduce( grb_d_out, NULL, NULL, GxB_PLUS_FP32_MONOID,
A, NULL ));
#ifndef NDEBUG
GxB_print (grb_d_out, 1) ;
// GxB_print (A, 3) ;
#endif
// Iteration
// Initialize PR vector
LAGRAPH_OK(GrB_Vector_new(&grb_pr, GrB_FP32, n));
LAGRAPH_OK(GrB_Vector_new(&importance_vec, GrB_FP32, n));
// Teleport value
const float teleport = (1 - damping_factor) / n;
float tol = 1e-4;
float rdiff = 1 ; // first iteration is always done
GrB_Type type = GrB_FP32 ;
GrB_Index *dI = NULL ;
float *d_sp= NULL ;
GrB_Index d_nvals;
GrB_Index d_n;
// d_sp <----- grb_d_out || export
LAGRAPH_OK (GxB_Vector_export (&grb_d_out, &type, &d_n, &d_nvals, &dI,
(void **) (&d_sp), NULL)) ;
// dens d_out
float *d_out = (float *) calloc(n, sizeof(float));
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n , nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t i = 0 ; i < d_nvals; i++){
GrB_Index ind = (GrB_Index) dI[i];
d_out [ind] = d_sp [i];
}
free (d_sp);
free (dI);
#ifndef NDEBUG
for (int i = 0 ; i < print_size; i++){
printf("d_out [%d]=%ld\n", i, d_out [i]);
}
#endif
// initializing pr
float *pr = (float *) malloc (n*sizeof(float));
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0; i < n ; i++){
pr [i] = 1.0/n;
}
#ifndef NDEBUG
for (int i = 0 ; i < print_size ; i++){
printf("pr[%d]=%f\n", i, pr [i]);
}
#endif
float *oldpr = (float *) malloc (n*sizeof(float));
//initailze the dense indices
GrB_Index *I = LAGraph_malloc(n, sizeof(GrB_Index));
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index j = 0; j < n; j++){
I[j] = j;
}
#ifdef PRINT_TIMING_INFO
// stop the timer
double t1 = LAGraph_toc (tic);
printf ("\ninitialization time: %12.6e (sec)\n",t1);
LAGraph_tic (tic);
#endif
for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++) {
// oldpr = pr; deep copy
//GrB_Vector_dup(&oldpr, pr);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0; i < n ; i++){
oldpr [i] = pr [i];
}
// Importance calculation
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0 ; i < n; i++){
if (d_out [i] != 0){
pr [i] = damping_factor * pr [i] / d_out [i];
}
else{
pr [i] = 0;
}
}
#ifndef NDEBUG
for (int i = 0 ; i < print_size; i++){
printf (" pr [%d] = %f\n", i, pr [i]);
}
#endif
// importance_vec <----- pr
LAGRAPH_OK (GxB_Vector_import (&importance_vec, GrB_FP32, n, n, &I,
(void **) (&pr), NULL)) ;
#ifndef NDEBUG
printf ("after importance_vec import\n");
GxB_print (importance_vec, 2) ;
#endif
// Calculate total PR of all inbound vertices
// importance_vec = A' * importance_vec
LAGRAPH_OK(GrB_mxv( importance_vec, NULL, NULL, GxB_PLUS_TIMES_FP32,
A, importance_vec, transpose_desc ));
#ifndef NDEBUG
printf ("==============2\n");
printf ("after mxv\n");
GxB_print (importance_vec, 1) ;
#endif
GrB_Index nvals_exp;
// pr <----- importance_vec
GrB_Type ivtype;
LAGRAPH_OK (GxB_Vector_export (&importance_vec, &ivtype, &n, &nvals_exp,
&I, (void **) (&pr), NULL)) ;
// assert (nvals_exp == n );
// PageRank summarization
// Add teleport, importance_vec, and dangling_vec components together
// pr = (1-df)/n + pr
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int i = 0 ; i < n; i++){
pr [i] += teleport;
}
#ifndef NDEBUG
for (int i = 0 ; i < print_size; i++){
printf (" pr [%d] = %f\n", i, pr [i]);
}
#endif
//----------------------------------------------------------------------
// rdiff = sum ((oldpr-pr).^2)
//----------------------------------------------------------------------
rdiff = 0;
// norm (oldpr pr, 1)
#pragma omp parallel for num_threads(nthreads) reduction(+:rdiff)
for (int i = 0 ; i < n; i++){
float d = (oldpr [i] - pr [i]);
d = (d > 0 ? d : -d); //abs(d)
rdiff += d;
}
#ifndef NDEBUG
printf("---------------------------iters %d rdiff=%f\n",*iters, rdiff);
#endif
}
#ifdef PRINT_TIMING_INFO
// stop the timer
double t2 = LAGraph_toc (tic);
printf ("compuatatin time: %12.6e (sec) ratio (comp/init): %f\n\n",
t2, t2/t1);
#endif
GrB_Index *prI = LAGraph_malloc(n, sizeof(GrB_Index));
// grb_pr<----- pr || import back
LAGRAPH_OK (GxB_Vector_import (&grb_pr, GrB_FP32, n, n, &I,
(void **) (&pr), NULL)) ;
(*result) = grb_pr;
free(I);
free (oldpr);
return (GrB_SUCCESS);
}
|
c_pi.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_pi.c
VERSION: 1.0
DATE: May 2004
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: Parallel implementation of PI generator using OpenMP
COMMENTS: The area under the curve y=4/(1+x*x) between 0 and 1 provides a way to compute Pi
The value of this integral can be approximated using a sum.
REFERENCES: http://en.wikipedia.org/wiki/Pi
http://nereida.deioc.ull.es/~llCoMP/examples/examples/pi/pi_description.html
BASIC PRAGMAS: parallel
USAGE: ./c_pi.par
INPUT: Default precision
OUTPUT: The value of PI
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include "OmpSCR.h"
#define DEFAULT_PREC 1000000 /* Default precision */
#define NUM_ARGS 1
#define NUM_TIMERS 1
int main(int argc, char *argv[]) {
double PI25DT = 3.141592653589793238462643;
double local, w, total_time, pi;
long i,
N; /* Precision */
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Precision"};
char *DEFAULTS_VALUE[NUM_ARGS] = {"1000000"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
/* Default: DEFAULT_PREC; */
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Pi generator", "Param: precission", NUM_ARGS, PARAM_NAMES, DEFAULTS_VALUE , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv);
N = OSCR_getarg_int(NUM_ARGS);
OSCR_timer_start(0);
w = 1.0 / N;
pi = 0.0;
/* #pragma omp parallel for default(shared) private(i, local)reduction(+:pi) schedule(static, 1) */
#pragma omp parallel for default(shared) private(i, local)reduction(+:pi)
for(i = 0; i < N; i++) {
local = (i + 0.5) * w;
pi += 4.0 / (1.0 + local * local);
}
pi *= w;
OSCR_timer_stop(0);
total_time = OSCR_timer_read(0);
OSCR_report();
printf("\n \t# THREADS INTERVAL \tTIME (secs.) \tPI \t\t\tERROR\n");
printf("\t %d \t%10ld \t%14.6lf \t%1.20f\t%g\n", NUMTHREADS, N, total_time, pi, PI25DT-pi);
return 0;
}
/*
* vim:ts=2:sw=2:
*/
|
orphaned-directives.c | #include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
static double a[1000];
static void init(void)
{
int i;
i=i+5;
#pragma omp for
for (i=0;i<1000;i++)
{
a[i]=(double)i/2.0;
}
}
int main(void){
#pragma omp parallel
{
init();
}
return 0;
}
|
mm-omp.c | /**
*
* Matrix Multiplication - Shared-memory (OpenMP)
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <xmmintrin.h>
int size;
int threads;
typedef struct
{
float ** element;
} matrix;
long long wall_clock_time()
{
#ifdef LINUX
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
// allocate array for all the rows
m->element = (float**)malloc(sizeof(float*) * size);
if (m->element == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
m->element[i] = (float*)malloc(sizeof(float) * size);
if (m->element[i] == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
}
}
/**
* Free the memory allocated to a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
{
free(m->element[i]);
}
free(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Parallelize the multiplication
// Each thread will work on one iteration of the outer-most loop
// Variables are shared among threads (a, b, result)
// and each thread has its own private copy (i, j, k)
#pragma omp parallel for shared(a, b, result) private (i, j, k)
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result;
long long before, after;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// Perform parallel matrix multiplication
before = wall_clock_time();
mm(a, b, result);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000);
// Print the result matrix
// print_matrix(result);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size> <threads>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
if (argc >= 3)
threads = atoi(argv[2]);
else
threads = -1;
// Multiply the matrices
if (threads != -1)
{
omp_set_num_threads(threads);
}
#pragma omp parallel
{
threads = omp_get_num_threads();
}
printf("Matrix multiplication of size %d using %d threads\n", size, threads);
work();
return 0;
}
|
firstlastprivate.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main() {
int i, n = 7;
int a[n], suma=0;
for (i=0; i<n; i++)
a[i] = i;
#pragma omp parallel for firstprivate(suma) lastprivate(suma)
for (i=0; i<n; i++){
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",omp_get_thread_num(),i,suma);
}
printf("\nFuera de la construcción parallel suma=%d\n",suma);
}
|
DRB050-functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len)
{
int i ;
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
double o1[100];
double c[100];
int main()
{
foo1 (o1, c, 100);
return 0;
}
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include "dynmat.h"
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[27][3],
const long *multi,
const double *mass,
const long *s2p_map,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[27][3],
const long *multi,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j,
const long k);
static double get_dielectric_part(const double q_cart[3],
const double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double q_cart[3],
const double *q_direction_cart,
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance);
static void make_Hermitian(double *mat, const long num_band);
static void multiply_borns(double *dd,
const double *dd_in,
const long num_patom,
const double (*born)[3][3]);
long dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[27][3],
const long *multi,
const double *mass,
const long *s2p_map,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long with_openmp)
{
long i, j, ij;
if (with_openmp) {
#pragma omp parallel for
for (ij = 0; ij < num_patom * num_patom ; ij++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_recip_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
const double (*born)[3][3],
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
long i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] *= factor;
}
/* This may not be necessary. */
/* make_Hermitian(dd, num_patom * 3); */
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_recip_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double (*born)[3][3],
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
long i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1,
G_list,
num_G,
num_patom,
zero_vec,
NULL,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++) {
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++) {
adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
/* Summation over another atomic index */
/* for (j = 0; j < num_patom; j++) { */
/* for (k = 0; k < 3; k++) { /\* alpha *\/ */
/* for (l = 0; l < 3; l++) { /\* beta *\/ */
/* adrs = j * 9 + k * 3 + l; */
/* for (i = 0; i < num_patom; i++) { */
/* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */
/* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */
/* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */
/* } */
/* } */
/* } */
/* } */
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(double (*charge_sum)[3][3],
const long num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
const double (*born)[3][3])
{
long i, j, k, a, b;
double (*q_born)[3];
q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom, num_patom, 27, 3] */
/* shortest_vectors[num_satom, num_patom, 27, 3] */
/* multiplicities[num_satom, num_patom] */
void dym_transform_dynmat_to_fc(double *fc,
const double *dm,
const double (*comm_points)[3],
const double (*shortest_vectors)[27][3],
const long *multiplicities,
const double *masses,
const long *s2pp_map,
const long *fc_index_map,
const long num_patom,
const long num_satom)
{
long i, j, k, l, m, N, adrs, multi;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++) {
fc[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_satom; j++) {
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++) {
cos_phase = 0;
sin_phase = 0;
multi = multiplicities[j * num_patom + i];
for (l = 0; l < multi; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase -= comm_points[k][m] *
shortest_vectors[j * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= multi;
sin_phase /= multi;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 +
l * num_patom * 6 + s2pp_map[j] * 6 + m * 2;
fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix,
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[27][3],
const long *multi,
const double *mass,
const long *s2p_map,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j)
{
long k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const long num_patom,
const long num_satom,
const double *fc,
const double q[3],
const double (*svecs)[27][3],
const long *multi,
const long *p2s_map,
const double (*charge_sum)[3][3],
const long i,
const long j,
const long k)
{
long l, m;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < multi[k * num_patom + i]; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * svecs[k * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i];
sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i];
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
const double dielectric[3][3])
{
long i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G,
const long num_patom,
const double q_cart[3],
const double *q_direction_cart,
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
long i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction_cart) {
continue;
} else {
dielectric_part = get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] =
q_direction_cart[i] * q_direction_cart[j] / dielectric_part;
}
}
}
} else {
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const long num_band)
{
long i, j, adrs, adrsT;
for (i = 0; i < num_band; i++) {
for (j = i; j < num_band; j++) {
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT+ 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd,
const double *dd_in,
const long num_patom,
const double (*born)[3][3])
{
long i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++) { /* alpha' */
for (n = 0; n < 3; n++) { /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n ;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
GB_binop__pow_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16)
// C=scalar+B GB (_bind1st__pow_uint16)
// C=scalar+B' GB (_bind1st_tran__pow_uint16)
// C=A+scalar GB (_bind2nd__pow_uint16)
// C=A'+scalar GB (_bind2nd_tran__pow_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_pow_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pow_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict image_nexus;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickFalse,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickFalse,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickFalse,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=time((time_t *) NULL);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
/*
Compute the remainder of dividing offset by extent. It returns not only
the quotient (tile the offset falls in) but also the positive remainer
within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the
normal default truncated modulo division.
*/
modulo.quotient=offset/(ssize_t) extent;
if ((offset < 0L) && (modulo.quotient > (ssize_t) (-SSIZE_MAX)))
modulo.quotient--;
modulo.remainder=(ssize_t) (offset-(double) modulo.quotient*extent);
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
RectangleInfo
region;
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
s=(unsigned char *) nexus_info->metacontent;
virtual_nexus=AcquirePixelCacheNexus(1);
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
return(ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,beta)));
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict image_nexus;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],
(MagickRealType) GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,image->rows) == MagickFalse))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(SyncImagePixelCache(image,exception));
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,
% const MagickBooleanType buffered,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
return(MagickFalse);
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((region->width == 0) || (region->height == 0))
return((Quantum *) NULL);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
ssize_t
x,
y;
x=(ssize_t) region->width+region->x-1;
y=(ssize_t) region->height+region->y-1;
if (((region->x >= 0) &&
(region->y >= 0) && (y < (ssize_t) cache_info->rows)) &&
(((region->x == 0) && (region->width == cache_info->columns)) ||
((region->height == 1) && (x < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) region->y*cache_info->columns+region->x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region=(*region);
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) region->width*region->height;
length=MagickMax(number_pixels,cache_info->columns)*
cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region=(*region);
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
/// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64, true if the tail-allocated integer is
/// unsigned.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
/// integer. 7 bits because it is the minimal number of bits to represent a
/// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
/// tail-allocated APValue.
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 3 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
/// 8 bits would be enough, but we require (and test for) at least 16 bits
/// to mirror FunctionType.
unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class LambdaExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class LambdaExpr;
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
unsigned ExplicitResultType : 1;
/// The number of captures.
unsigned NumCaptures : 16;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
/// The likelihood of a branch being taken.
enum Likelihood {
LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute.
LH_None, ///< No attribute set or branches of the IfStmt have
///< the same attribute.
LH_Likely ///< Branch has the [[likely]] attribute.
};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \returns the likelihood of a set of attributes.
static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs);
/// \returns the likelihood of a statement.
static Likelihood getLikelihood(const Stmt *S);
/// \returns the likelihood attribute of a statement.
static const Attr *getLikelihoodAttr(const Stmt *S);
/// \returns the likelihood of the 'then' branch of an 'if' statement. The
/// 'else' branch is required to determine whether both branches specify the
/// same likelihood, which affects the result.
static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else);
/// \returns whether the likelihood of the branches of an if statement are
/// conflicting. When the first element is \c true there's a conflict and
/// the Attr's are the conflicting attributes of the Then and Else Stmt.
static std::tuple<bool, const Attr *, const Attr *>
determineLikelihoodConflict(const Stmt *Then, const Stmt *Else);
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LPL, SourceLocation RPL, Stmt *Then,
SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc, RParenLoc;
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
omp-single-1.c | extern void abort (void);
main()
{
int i = 0;
#pragma omp parallel shared (i)
{
#pragma omp single
{
i++;
}
}
if (i != 1)
abort ();
return 0;
}
|
VolumetricMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c"
#else
static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
THNN_CHECK_SHAPE_INDICES(input, indices);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 10,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input->dim() == 5)
{
dimt++;
dimw++;
dimh++;
dimn++;
}
int nslices = input->size(dimn);
if (gradOutput != NULL) {
if (oT != gradOutput->size(dimt) || oW != gradOutput->size(dimw) || oH != gradOutput->size(dimh))
{
THError(
"Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d",
oT, oH, oW, gradOutput->size(dimt), gradOutput->size(dimh), gradOutput->size(dimw)
);
}
THNN_CHECK_DIM_SIZE(gradOutput, input->dim(), dimn, nslices);
}
}
static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
real *input_p,
real *output_p,
THIndex_t *ind_p,
int nslices,
int iT,
int iW,
int iH,
int oT,
int oW,
int oH)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *output_p_k = output_p + k * oT * oH * oW;
real *input_p_k = input_p + k * iT * iH * iW;
THIndex_t *ind_p_k = ind_p + k * iT * iH * iW;
int t, i, j, index;
THIndex_t maxp;
for (t = 0; t < iT; t++)
{
for (i = 0; i < iH; i++)
{
for (j = 0; j < iW; j++)
{
index = t * iH * iW + i * iW + j;
maxp = ind_p_k[index] - TH_INDEX_BASE; /* retrieve position of max */
if (maxp < 0 || maxp >= oT * oW * oH)
{
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[index]; /* update output */
}
}
}
}
}
if (has_error) {
THError(
"found an invalid max index %ld (output volumes are of size %dx%dx%d)",
error_index, oT, oH, oW
);
}
}
void THNN_(VolumetricMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int iT;
int iH;
int iW;
real *input_data;
real *output_data;
THIndex_t *indices_data;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, NULL, indices,
oT, oW, oH, dT, dW, dH, pT, pW, pH);
if (input->dim() == 5)
{
nbatch = input->size(0);
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimt-1);
iT = input->size(dimt);
iH = input->size(dimh);
iW = input->size(dimw);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 4)
{
THTensor_(resize4d)(output, nslices, oT, oH, oW);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
input_data, output_data,
indices_data,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
else
{
int p;
THTensor_(resize5d)(output, nbatch, nslices, oT, oH, oW);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iT*iW*iH,
output_data+p*nslices*oT*oW*oH,
indices_data+p*nslices*iT*iW*iH,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
}
/* cleanup */
THTensor_(free)(input);
THIndexTensor_(free)(indices);
}
static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iT,
int iW,
int iH,
int oT,
int oW,
int oH)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *gradInput_p_k = gradInput_p + k * iT * iH * iW;
real *gradOutput_p_k = gradOutput_p + k * oT * oH * oW;
THIndex_t *ind_p_k = ind_p + k * iT * iH * iW;
int t, i, j, index;
THIndex_t maxp;
for (t = 0; t < iT; t++)
{
for (i = 0; i < iH; i++)
{
for (j = 0; j < iW; j++)
{
index = t * iH * iW + i * iW + j;
maxp = ind_p_k[index] - TH_INDEX_BASE; /* retrieve position of max */
if (maxp < 0 || maxp >= oT * oH * oW)
{
THError("invalid max index %ld, oT= %d, oW= %d, oH= %d", maxp, oT, oW, oH);
}
gradInput_p_k[index] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
}
void THNN_(VolumetricMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int iT;
int iH;
int iW;
real *gradInput_data;
real *gradOutput_data;
THIndex_t *indices_data;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, gradOutput, indices,
oT, oW, oH, dT, dW, dH, pT, pW, pH);
// TODO: check gradOutput shape
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 5)
{
nbatch = input->size(0);
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimt-1);
iT = input->size(dimt);
iH = input->size(dimh);
iW = input->size(dimw);
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 4)
{
THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
gradInput_data, gradOutput_data,
indices_data,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
gradInput_data+p*nslices*iT*iW*iH,
gradOutput_data+p*nslices*oT*oW*oH,
indices_data+p*nslices*iT*iW*iH,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
DRB034-truedeplinear-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A linear expression is used as array subscription.
Data race pair: a[2*i+1]@66:5 vs. a[i]@66:14
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int len=2000;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0; i<len; i++)
a[i]=i;
#pragma omp parallel for
for (i=0;i<len/2;i++)
a[2*i+1]=a[i]+1;
omprace_fini();
return 0;
}
|
avx2-vect-aggressive.c | /* { dg-do run } */
/* { dg-require-effective-target avx2 } */
/* { dg-options "-mavx2 -O3 -fopenmp-simd -fdump-tree-vect-details" } */
#include "avx2-check.h"
#define N 64
float a[N];
int c[N];
__attribute__ ((noinline)) int
foo ()
{
int i, res = 0;
#pragma omp simd safelen(8)
for (i=0; i<N; i++)
{
float t = a[i];
if (t > 0.0f & t < 1.0e+2f)
if (c[i] != 0)
res += 1;
}
return res;
}
__attribute__ ((noinline)) float
hundred ()
{
return 100.0f;
}
static void
avx2_test (void)
{
int i, res;
for (i=0; i<N; i++)
{
c[i] = i % 4;
if (i < N / 2)
a[i] = (float) (i + 1);
else
a[i] = (float) i + hundred ();
}
if (foo () != 24)
abort ();
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
|
6.race1.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
// Taken from ompVerify, Fig. 1
#include <omp.h>
#define N 20
int main() {
int x[N], b[N], L[N][N];
#pragma omp parallel for
for (int i = 0; i < N; i++) {
x[i] = b[i];
for (int j = 0; j < i; j++)
x[i] = x[i] - L[i][j] * x[j];
x[i] = x[i] / L[i][i];
}
}
// CHECK: Data Race detected
// END
|
bli_dotv_bgq_int.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_bgq_int
(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho,
cntx_t* cntx
)
{
bool use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( *rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
nbody-soa.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#define SOFTENING 1e-9f
typedef struct { float *x, *y, *z, *vx, *vy, *vz; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
void bodyForce(BodySystem p, float dt, int n) {
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < n; i++) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int j = 0; j < n; j++) {
float dy = p.y[j] - p.y[i];
float dz = p.z[j] - p.z[i];
float dx = p.x[j] - p.x[i];
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0f / sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
p.vx[i] += dt*Fx; p.vy[i] += dt*Fy; p.vz[i] += dt*Fz;
}
}
int main(const int argc, const char** argv) {
int nBodies = 30000;
if (argc > 1) nBodies = atoi(argv[1]);
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = 6*nBodies*sizeof(float);
float *buf = (float*)malloc(bytes);
BodySystem p;
p.x = buf+0*nBodies; p.y = buf+1*nBodies; p.z = buf+2*nBodies;
p.vx = buf+3*nBodies; p.vy = buf+4*nBodies; p.vz = buf+5*nBodies;
randomizeBodies(buf, 6*nBodies); // Init pos / vel data
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
bodyForce(p, dt, nBodies); // compute interbody forces
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.x[i] += p.vx[i]*dt;
p.y[i] += p.vy[i]*dt;
p.z[i] += p.vz[i]*dt;
}
const double tElapsed = GetTimer() / 1000.0;
if (iter > 1) { // First iter is warm up
totalTime += tElapsed;
}
#ifndef SHMOO
printf("Iteration %d: %.3f seconds\n", iter, tElapsed);
#endif
}
double avgTime = totalTime / (double)(nIters-1);
#ifdef SHMOO
printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#else
printf("Average rate for iterations 2 through %d: %.3f +- %.3f steps per second.\n",
nIters, rate);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#endif
free(buf);
}
|
ParallelOpenMP.h | #pragma once
#include <ATen/ATen.h>
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
// Work around memory leak when using 1 thread in nested "omp parallel"
// caused by some buggy OpenMP versions and the fact that omp_in_parallel()
// returns false when omp_get_max_threads() == 1 inside nested "omp parallel"
// See issue gh-32284
#pragma omp parallel if (omp_get_max_threads() > 1 && !omp_in_parallel() && ((end - begin) > grain_size))
{
// choose number of tasks based on grain size and number of threads
// can't use num_threads clause due to bugs in GOMP's thread pool (See #32008)
int64_t num_threads = omp_get_num_threads();
if (grain_size > 0) {
num_threads = std::min(num_threads, divup((end - begin), grain_size));
}
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
affinity.c | // Normal compile
// Intel:
// mpiicc -qopenmp phostone.c -o phostone
// gcc:
// mpicc -qopenmp phostone.c -o phostone
//
// To compile without openmp
// Intel:
// mpiicc -qopenmp-stubs phostone.c -o purempi
// gcc:
// mpicc -DSTUBS phostone.c -o purempi
//
//
#include <ctype.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <time.h>
#include <utmpx.h>
#define MPI_Wtime omp_get_wtime
// which processor on a node will
// print env if requested
#ifndef PID
#define PID 0
#endif
void dothreads(int full, char *myname, int myid, int mycolor, int new_id);
char *trim(char *s);
void slowit(long nints, int val);
int sched_getcpu();
void ptime() {
time_t rawtime;
struct tm *timeinfo;
char buffer[80];
time(&rawtime);
timeinfo = localtime(&rawtime);
strftime(buffer, 80, "%c", timeinfo);
// puts (buffer);
printf("%s\n", buffer);
}
int findcore() {
int cpu;
#ifdef __APPLE__
cpu = -1;
#else
cpu = sched_getcpu();
#endif
return cpu;
}
int str_upr(char *cstr) {
char *str = cstr;
for (; *str; str++) {
if (isalpha(*str))
if (*str >= 'a') {
*str += 'A' - 'a';
}
}
return 0;
}
int str_low(char *cstr) {
char *str = cstr;
for (; *str; str++) {
if (isalpha(*str))
if (*str < 'a') {
*str += 'a' - 'A';
}
}
return 0;
}
void dohelp();
void dohelp() {
/************************************************************
* This is a glorified hello world program. Each processor
* prints name, rank, and other information as described below.
* ************************************************************/
printf("phostname arguments:\n");
printf(" -h : Print this help message\n");
printf("\n");
printf("no arguments : Print a list of the nodes on which the command is "
"run.\n");
printf("\n");
printf(" -f or -1 : Same as no argument but print MPI task id and Thread "
"id\n");
printf(" If run with OpenMP threading enabled OMP_NUM_THREADS "
"> 1\n");
printf(" there will be a line per MPI task and Thread.\n");
printf("\n");
printf(" -F or -2 : Add columns to tell first MPI task on a node and and "
"the\n");
printf(" numbering of tasks on a node. (Hint: pipe this output "
"in\n");
printf(" to sort -r\n");
printf("\n");
printf(" -E or -B : Print thread info at 'E'nd of the run or 'B'oth the "
"start and end\n");
printf("\n");
printf(" -a : Print a listing of the environmental variables passed "
"to\n");
printf(" MPI task. (Hint: use the -l option with SLURM to "
"prepend MPI\n");
printf(" task #.)\n");
printf("\n");
printf(" -s ######## : Where ######## is an integer. Sum a bunch on "
"integers to slow\n");
printf(" down the program. Should run faster with multiple "
"threads.\n");
printf("\n");
printf(" -t ######## : Where is a time in seconds. Sum a bunch on integers "
"to slow\n");
printf(" down the program and run for at least the given "
"seconds.\n");
printf("\n");
printf(" -T : Print time/date at the beginning/end of the run.\n");
printf("\n");
printf(" This version is pure OpenMP, no MPI. All outputs are the same as "
"the hybrid\n");
printf(" version but we have the 'hardwired' values of Comm_rank=0 "
"Comm_size=1\n");
printf("\n");
}
/* valid is used to get around an issue in some versions of
* MPI that screw up the environmnet passed to programs. Its
* usage is not recommended. See:
* https://wiki.sei.cmu.edu/confluence/display/c/MEM10-C.+Define+and+use+a+pointer+validation+function
*
* "The valid() function does not guarantee validity; it only
* identifies null pointers and pointers to functions as invalid.
* However, it can be used to catch a substantial number of
* problems that might otherwise go undetected."
*/
int valid(void *p) {
extern char _etext;
return (p != NULL) && ((char *)p > &_etext);
}
char f1234[128], f1235[128], f1236[128];
int main(int argc, char **argv, char *envp[]) {
char *eql;
int myid, numprocs, resultlen;
int mycolor, new_id, new_nodes;
int i, k;
char lname[256];
//#ifdef MPI_MAX_LIBRARY_VERSION_STRING
char version[256];
//#else
// char version[40];
//#endif
char *myname, *cutit;
int full, envs, iarg, tn, nt, help, slow, vlan, wait, dotime, when;
long nints;
double t1, t2, dt;
/* Format statements */
// char *f1234="%4.4d %4.4d %18s %4.4d %4.4d
// %4.4d\n"; char *f1235="%s %4.4d %4.4d\n"; char *f1236="%s\n";
strcpy(f1234, "%4.4d %4.4d %18s %4.4d %4.4d %4.4d\n");
strcpy(f1235, "%s %4.4d %4.4d\n");
strcpy(f1236, "%s\n");
//#ifdef MPI_MAX_LIBRARY_VERSION_STRING
sprintf(version, "pure OpenMP");
//#else
// sprintf(version,"%s","UNDEFINED - consider upgrading");
//#endif
numprocs = 1;
myid = 0;
sprintf(lname, "n1234");
gethostname(lname, 256);
/* Get rid of "stuff" from the processor name. */
myname = trim(lname);
/* The next line is required for BGQ because the MPI task ID
is encoded in the processor name and we don't want it. */
if (strrchr(myname, 32))
myname = strrchr(myname, 32);
/* Here we cut off the tail of node name, Summit in this case */
cutit = strstr(myname, ".rc.int.colorado.edu");
if (cutit)
cutit[0] = (char)0;
slow = 0;
wait = 0;
/* read in command line args from task 0 */
if (myid == 0) {
full = 0;
envs = 0;
help = 0;
dotime = 0;
when = 1;
if (argc > 1) {
for (iarg = 1; iarg < argc; iarg++) {
if ((strcmp(argv[iarg], "-h") == 0) ||
(strcmp(argv[iarg], "--h") == 0) ||
(strcmp(argv[iarg], "-help") == 0))
help = 1;
/**/
if ((strcmp(argv[iarg], "-f") == 0) || (strcmp(argv[iarg], "-1") == 0))
full = 1;
/**/
if ((strcmp(argv[iarg], "-F") == 0) || (strcmp(argv[iarg], "-2") == 0))
full = 2;
/**/
if (strcmp(argv[iarg], "-s") == 0)
slow = 1;
/**/
if (strcmp(argv[iarg], "-t") == 0)
wait = 1;
/**/
if (strcmp(argv[iarg], "-a") == 0)
envs = 1;
/**/
if (strcmp(argv[iarg], "-T") == 0)
dotime = 1;
if (strcmp(argv[iarg], "-B") == 0)
when = 3;
if (strcmp(argv[iarg], "-E") == 0)
when = 2;
}
}
}
/* send info to all tasks, if doing help doit and quit */
if (help == 1) {
if (myid == 0)
dohelp();
exit(0);
}
if (myid == 0 && dotime == 1)
ptime();
if (myid == 0 && full == 2) {
printf("%s\n", version);
printf("task thread node name first task # on node "
"core\n");
}
/*********/
/* The routine NODE_COLOR will return the same value for all mpi
tasks that are running on the same node. We use this to create
a new communicator from which we get the numbering of tasks on
a node. */
// NODE_COLOR(&mycolor);
mycolor = 0;
new_id = 0;
new_nodes = 1;
tn = -1;
nt = -1;
/* Here we print out the information with the format and
verbosity determined by the value of full. We do this
a task at a time to "hopefully" get a bit better formatting. */
for (i = 0; i < numprocs; i++) {
if (i != myid)
continue;
if (when == 3)
str_low(myname);
if (when != 2)
dothreads(full, myname, myid, mycolor, new_id);
/* here we print out the environment in which a MPI task is running */
/* We try to determine if the passed environment is valid but sometimes
* it just does not work and this can crash. Try taking out myid==0
* and setting PID to a nonzero value.
*/
// if (envs == 1 && new_id==1) {
if (envs == 1 && (myid == PID || myid == 0)) {
k = 0;
if (valid(envp) == 1) {
// while(envp[k]) {
while (valid(envp[k]) == 1) {
if (strlen(envp[k]) > 3) {
eql = strchr(envp[k], '=');
if (eql == NULL)
break;
printf("? %d %s\n", myid, envp[k]);
} else {
break;
}
// printf("? %d %d\n",myid,k);
k++;
}
} else {
printf("? %d %s\n", myid, "Environmnet not set");
}
}
}
if (myid == 0) {
dt = 0;
if (wait) {
slow = 0;
for (iarg = 1; iarg < argc; iarg++) {
// printf("%s\n",argv[iarg]);
if (atof(argv[iarg]) > 0)
dt = atof(argv[iarg]);
}
}
}
if (dt > 0) {
nints = 100000;
t1 = MPI_Wtime();
t2 = t1;
while (dt > t2 - t1) {
for (i = 1; i <= 1000; i++) {
slowit(nints, i);
}
t2 = MPI_Wtime();
}
if (myid == 0)
printf("total time %10.3f\n", t2 - t1);
nints = 0;
}
if (myid == 0) {
nints = 0;
if (slow == 1) {
for (iarg = 1; iarg < argc; iarg++) {
if (atol(argv[iarg]) > 0)
nints = atol(argv[iarg]);
}
}
}
if (nints > 0) {
t1 = MPI_Wtime();
for (i = 1; i <= 1000; i++) {
slowit(nints, i);
}
t2 = MPI_Wtime();
if (myid == 0)
printf("total time %10.3f\n", t2 - t1);
}
if (myid == 0 && dotime == 1)
ptime();
if (when > 1) {
for (i = 0; i < numprocs; i++) {
if (i != myid)
continue;
if (when == 3)
str_upr(myname);
dothreads(full, myname, myid, mycolor, new_id);
}
}
return 0;
}
char *trim(char *s) {
int i = 0;
int j = strlen(s) - 1;
int k = 0;
while (isspace(s[i]) && s[i] != '\0')
i++;
while (isspace(s[j]) && j >= 0)
j--;
while (i <= j)
s[k++] = s[i++];
s[k] = '\0';
return s;
}
/*
! return a integer which is unique to all mpi
! tasks running on a particular node. It is
! equal to the id of the first MPI task running
! on a node. This can be used to create
! MPI communicators which only contain tasks on
! a node.
*/
#include <string.h>
int node_color() { return 0; }
void slowit(long nints, int val) {
int *block;
long i, sum;
#ifdef VERBOSET
double t2, t1;
t1 = MPI_Wtime();
#endif
block = (int *)malloc(nints * sizeof(int));
#pragma omp parallel for
for (i = 0; i < nints; i++) {
block[i] = val;
}
sum = 0;
#pragma omp parallel for reduction(+ : sum)
for (i = 0; i < nints; i++) {
sum = sum + block[i];
}
#ifdef VERBOSET
t2 = MPI_Wtime();
printf("sum of integers %ld %10.3f\n", sum, t2 - t1);
#endif
free(block);
}
#ifdef STUBS
int omp_get_thread_num(void) { return 0; }
int omp_get_num_threads(void) { return 1; }
#endif
void dothreads(int full, char *myname, int myid, int mycolor, int new_id) {
int nt, tn;
#pragma omp parallel
{
nt = omp_get_num_threads();
if (nt == 0)
nt = 1;
#pragma omp critical
{
if (nt < 2) {
nt = 1;
tn = 0;
} else {
tn = omp_get_thread_num();
}
if (full == 0) {
if (tn == 0)
printf(f1236, trim(myname));
}
if (full == 1) {
printf(f1235, trim(myname), myid, tn);
}
if (full == 2) {
printf(f1234, myid, tn, trim(myname), mycolor, new_id, findcore());
}
}
}
}
|
Layer_Im2Mat.h | /*
* Layers.h
* rl
*
* Created by Guido Novati on 11.02.16.
* Copyright 2016 ETH Zurich. All rights reserved.
*
*/
#pragma once
#include "Layers.h"
// Im2MatLayer gets as input an image of sizes InX * InY * InC
// and prepares the output for convolution with a filter of size KnY * KnX * KnC
// and output an image of size OpY * OpX * KnC
template
<
int InX, int InY, int InC, //input image: x:width, y:height, c:color channels
int KnX, int KnY, int KnC, //filter: x:width, y:height, c:color channels
int Sx, int Sy, // stride x/y
int Px, int Py, // padding x/y
int OpX, int OpY //output img: x:width, y:height, same color channels as KnC
>
struct Im2MatLayer: public Layer
{
//Im2ColLayer has no parameters:
Params* allocate_params() const override { return nullptr; }
Im2MatLayer(const int _ID) : Layer(OpY*OpX*KnY*KnX*InC, _ID) {
static_assert(Sx> 0 && Sy> 0, "Invalid stride");
static_assert(Px>=0 && Py>=0, "Invalid kernel");
print();
}
void print() {
printf("(%d) Im2Col transform Img:[%d %d %d] to Mat:[%d %d %d %d %d] ",
ID, InY,InX,InC, OpY,OpX,KnY,KnX,InC);
printf("with Stride:[%d %d] and Padding:[%d %d]\n",Sx,Sy,Px,Py);
}
void forward(const std::vector<Activation*>& act,
const std::vector<Params*>& param) const override
{
const int batchSize = act[ID]->batchSize;
assert(act[ID-1]->layersSize == InX * InY * InC);
assert(act[ID]->layersSize == OpY * OpX * KnY * KnX * InC);
Im2Mat(batchSize, act[ID-1]->output, act[ID]->output);
}
void bckward(const std::vector<Activation*>& act,
const std::vector<Params*>& param,
const std::vector<Params*>& grad) const override
{
const int batchSize = act[ID]->batchSize;
assert(act[ID-1]->layersSize == InX * InY * InC);
assert(act[ID]->layersSize == OpY * OpX * KnY * KnX * InC);
Mat2Im(batchSize, act[ID]->dError_dOutput, act[ID-1]->dError_dOutput);
}
void Im2Mat(const int BS, const Real*const lin_inp, Real*const lin_out) const
{
using InputImages = Real[][InY][InX][InC];
using OutputMatrices = Real[][OpY][OpX][KnY][KnX][InC];
// Convert pointers to a reference to multi dim arrays for easy access:
// 1) INP is a reference: i'm not creating new data
// 2) The type of INP is an array of sizes [???][InY][InX][InC]
// 3) The first dimension is the batchsize and is not known at compile time
// 4) Because it's the slowest index the compiler does not complain
// 5) The conversion should be read from right to left: (A) convert lin_inp
// to pointer to a static multi-array of size [???][InY][InX][InC]
// (B) Return the reference of the memory space pointed at by a.
const InputImages & INP = * (InputImages*) lin_inp;
// (B)( A )
OutputMatrices & OUT = * (OutputMatrices*) lin_out;
// clean up memory space of lin_out. Why? Because padding, that's why.
#if 1
#pragma omp parallel for collapse(6) schedule(static)
for (int bc=0; bc<BS; bc++)
for (int oy = 0; oy < OpY; oy++)
for (int ox = 0; ox < OpX; ox++)
for (int fy = 0; fy < KnY; fy++)
for (int fx = 0; fx < KnX; fx++)
for (int ic = 0; ic < InC; ic++)
OUT[bc][oy][ox][fy][fx][ic] = 0;
#else
memset(lin_out, 0, BS * OpY * OpX * KnY * KnX * InC * sizeof(Real) );
#endif
// printf("TO CHECK: Im2MatLayer::Im2Mat\n");
#pragma omp parallel for collapse(5) schedule(static)
for (int bc= 0; bc < BS; bc++) // Loop over minibatch items ]
for (int oy= 0; oy < OpY; oy++) // Loop over output Y ] OUT rows
for (int ox= 0; ox < OpX; ox++) // Loop over output X ]
{
for (int fy= 0; fy < KnY; fy++) // Loop over filter Y }
for (int fx= 0; fx < KnX; fx++) // Loop over filter X } OUT cols
{
const int iy{oy * Sy - Py + fy}; // Translate filter y idx to image y idx
const int ix{ox * Sx - Px + fx}; // Translate filter x idx to image x idx
if (0 <= iy && iy < InY && 0 <= ix && ix < InY) // Act only where no padding
{
for (int ic= 0; ic < InC; ic++) // Loop over input channels }
OUT[bc][oy][ox][fy][fx][ic]= INP[bc][iy][ix][ic];
}
}
}
}
void Mat2Im(const int BS, const Real*const lin_inp, Real*const lin_out) const
{
using InputImages = Real[][InY][InX][InC];
using OutputMatrices = Real[][OpY][OpX][KnY][KnX][InC];
// Output is d Loss d Input, same size as INP before:
InputImages & dLdINP = * (InputImages*) lin_out;
// Input is d Loss d Output, same size as OUT before:
const OutputMatrices & dLdOUT = * (OutputMatrices*) lin_inp;
// Mat2Im accesses memory with plus equal: reset field
#if 1
#pragma omp parallel for collapse(4) schedule(static)
for (int bc=0; bc<BS; bc++)
for (int iy = 0; iy < InY; iy++)
for (int ix = 0; ix < InX; ix++)
for (int ic = 0; ic < InC; ic++)
dLdINP[bc][iy][ix][ic] = 0;
#else
memset(lin_out, 0, BS * InY * InX * InC * sizeof(Real) );
#endif
// printf("TO CHECK: Im2MatLayer::Mat2Im\n");
#pragma omp parallel for collapse(5) schedule(static)
for (int bc= 0; bc < BS; bc++) // Loop over minibatch items ]
for (int oy= 0; oy < OpY; oy++) // Loop over output Y ] OUT rows
for (int ox= 0; ox < OpX; ox++) // Loop over output X ]
{
for (int fy= 0; fy < KnY; fy++) // Loop over filter Y }
for (int fx= 0; fx < KnX; fx++) // Loop over filter X } OUT cols
{
const int iy{oy * Sy - Py + fy}; // Translate filter y idx to image y idx
const int ix{ox * Sx - Px + fx}; // Translate filter x idx to image x idx
if (0 <= iy && iy < InY && 0 <= ix && ix < InY) // Act only where no padding
{
for (int ic= 0; ic < InC; ic++) // Loop over input channels }
dLdINP[bc][iy][ix][ic] += dLdOUT[bc][oy][ox][fy][fx][ic];
}
}
}
}
void init(std::mt19937& G, const std::vector<Params*>& P) const override { }
};
|
non_local_operator.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file non_local_operator.h
*
* \brief Contains declaration and implementation of sirius::Non_local_operator class.
*/
#ifndef __NON_LOCAL_OPERATOR_H__
#define __NON_LOCAL_OPERATOR_H__
#include "Beta_projectors/beta_projectors.h"
#include "simulation_context.h"
namespace sirius {
template <typename T>
class Non_local_operator
{
protected:
Simulation_context const& ctx__;
device_t pu_;
int packed_mtrx_size_;
mdarray<int, 1> packed_mtrx_offset_;
/// Non-local operator matrix.
mdarray<T, 2> op_;
mdarray<T, 1> work_;
bool is_null_{false};
Non_local_operator& operator=(Non_local_operator const& src) = delete;
Non_local_operator(Non_local_operator const& src) = delete;
public:
Non_local_operator(Simulation_context const& ctx_)
: ctx__(ctx_)
{
PROFILE("sirius::Non_local_operator::Non_local_operator");
pu_ = this->ctx__.processing_unit();
auto& uc = this->ctx__.unit_cell();
packed_mtrx_offset_ = mdarray<int, 1>(uc.num_atoms());
packed_mtrx_size_ = 0;
for (int ia = 0; ia < uc.num_atoms(); ia++) {
int nbf = uc.atom(ia).mt_basis_size();
packed_mtrx_offset_(ia) = packed_mtrx_size_;
packed_mtrx_size_ += nbf * nbf;
}
if (pu_ == GPU) {
packed_mtrx_offset_.allocate(memory_t::device);
packed_mtrx_offset_.template copy<memory_t::host, memory_t::device>();
}
}
~Non_local_operator()
{
}
inline void apply(int chunk__,
int ispn_block__,
Wave_functions& op_phi__,
int idx0__,
int n__,
Beta_projectors& beta_,
matrix<T>& beta_phi__);
inline T operator()(int xi1__, int xi2__, int ia__)
{
return (*this)(xi1__, xi2__, 0, ia__);
}
inline T operator()(int xi1__, int xi2__, int ispn__, int ia__)
{
int nbf = this->ctx__.unit_cell().atom(ia__).mt_basis_size();
return op_(packed_mtrx_offset_(ia__) + xi2__ * nbf + xi1__, ispn__);
}
};
template <>
inline void Non_local_operator<double_complex>::apply(int chunk__,
int ispn_block__,
Wave_functions& op_phi__,
int idx0__,
int n__,
Beta_projectors& beta_,
matrix<double_complex>& beta_phi__)
{
PROFILE("sirius::Non_local_operator::apply");
if (is_null_) {
return;
}
int jspn = ispn_block__ & 1;
auto& beta_gk = beta_.pw_coeffs_a();
int num_gkvec_loc = beta_.num_gkvec_loc();
int nbeta = beta_.chunk(chunk__).num_beta_;
if (static_cast<size_t>(nbeta * n__) > work_.size()) {
work_ = mdarray<double_complex, 1>(nbeta * n__);
if (pu_ == GPU) {
work_.allocate(memory_t::device);
}
}
/* compute O * <beta|phi> for atoms in a chunk */
#pragma omp parallel for
for (int i = 0; i < beta_.chunk(chunk__).num_atoms_; i++) {
/* number of beta functions for a given atom */
int nbf = beta_.chunk(chunk__).desc_(beta_desc_idx::nbf, i);
int offs = beta_.chunk(chunk__).desc_(beta_desc_idx::offset, i);
int ia = beta_.chunk(chunk__).desc_(beta_desc_idx::ia, i);
switch (pu_) {
case CPU: {
linalg<CPU>::gemm(0, 0, nbf, n__, nbf, op_.at<CPU>(packed_mtrx_offset_(ia), ispn_block__), nbf,
beta_phi__.at<CPU>(offs, 0), nbeta, work_.at<CPU>(offs), nbeta);
break;
}
case GPU: {
#ifdef __GPU
linalg<GPU>::gemm(0, 0, nbf, n__, nbf, op_.at<GPU>(packed_mtrx_offset_(ia), ispn_block__), nbf,
beta_phi__.at<GPU>(offs, 0), nbeta, work_.at<GPU>(offs), nbeta, omp_get_thread_num());
#endif
break;
}
}
}
/* compute <G+k|beta> * O * <beta|phi> and add to op_phi */
switch (pu_) {
case CPU: {
linalg<CPU>::gemm(0, 0, num_gkvec_loc, n__, nbeta, linalg_const<double_complex>::one(), beta_gk.at<CPU>(),
num_gkvec_loc, work_.at<CPU>(), nbeta, linalg_const<double_complex>::one(),
op_phi__.pw_coeffs(jspn).prime().at<CPU>(0, idx0__),
op_phi__.pw_coeffs(jspn).prime().ld());
break;
}
case GPU: {
#ifdef __GPU
/* wait for previous zgemms */
#pragma omp parallel
acc::sync_stream(omp_get_thread_num());
linalg<GPU>::gemm(0, 0, num_gkvec_loc, n__, nbeta, &linalg_const<double_complex>::one(), beta_gk.at<GPU>(),
beta_gk.ld(), work_.at<GPU>(), nbeta, &linalg_const<double_complex>::one(),
op_phi__.pw_coeffs(jspn).prime().at<GPU>(0, idx0__),
op_phi__.pw_coeffs(jspn).prime().ld());
acc::sync_stream(-1);
#endif
break;
}
}
}
template <>
inline void Non_local_operator<double>::apply(int chunk__,
int ispn_block__,
Wave_functions& op_phi__,
int idx0__,
int n__,
Beta_projectors& beta_,
matrix<double>& beta_phi__)
{
PROFILE("sirius::Non_local_operator::apply");
if (is_null_) {
return;
}
int jspn = ispn_block__ & 1;
auto& beta_gk = beta_.pw_coeffs_a();
int num_gkvec_loc = beta_.num_gkvec_loc();
int nbeta = beta_.chunk(chunk__).num_beta_;
if (static_cast<size_t>(nbeta * n__) > work_.size()) {
work_ = mdarray<double, 1>(nbeta * n__);
if (pu_ == GPU) {
work_.allocate(memory_t::device);
}
}
/* compute O * <beta|phi> */
#pragma omp parallel for
for (int i = 0; i < beta_.chunk(chunk__).num_atoms_; i++) {
/* number of beta functions for a given atom */
int nbf = beta_.chunk(chunk__).desc_(beta_desc_idx::nbf, i);
int offs = beta_.chunk(chunk__).desc_(beta_desc_idx::offset, i);
int ia = beta_.chunk(chunk__).desc_(beta_desc_idx::ia, i);
switch (pu_) {
case CPU: {
linalg<CPU>::gemm(0, 0, nbf, n__, nbf, op_.at<CPU>(packed_mtrx_offset_(ia), ispn_block__), nbf,
beta_phi__.at<CPU>(offs, 0), nbeta, work_.at<CPU>(offs), nbeta);
break;
}
case GPU: {
#ifdef __GPU
linalg<GPU>::gemm(0, 0, nbf, n__, nbf, op_.at<GPU>(packed_mtrx_offset_(ia), ispn_block__), nbf,
beta_phi__.at<GPU>(offs, 0), nbeta, work_.at<GPU>(offs), nbeta, omp_get_thread_num());
break;
#endif
}
}
}
/* compute <G+k|beta> * O * <beta|phi> and add to op_phi */
switch (pu_) {
case CPU: {
linalg<CPU>::gemm(0, 0, 2 * num_gkvec_loc, n__, nbeta, 1.0, reinterpret_cast<double*>(beta_gk.at<CPU>()),
2 * num_gkvec_loc, work_.at<CPU>(), nbeta, 1.0,
reinterpret_cast<double*>(op_phi__.pw_coeffs(jspn).prime().at<CPU>(0, idx0__)),
2 * op_phi__.pw_coeffs(jspn).prime().ld());
break;
}
case GPU: {
#ifdef __GPU
/* wait for previous zgemms */
#pragma omp parallel
acc::sync_stream(omp_get_thread_num());
linalg<GPU>::gemm(0, 0, 2 * num_gkvec_loc, n__, nbeta, &linalg_const<double>::one(),
reinterpret_cast<double*>(beta_gk.at<GPU>()), 2 * num_gkvec_loc, work_.at<GPU>(), nbeta,
&linalg_const<double>::one(),
reinterpret_cast<double*>(op_phi__.pw_coeffs(jspn).prime().at<GPU>(0, idx0__)),
2 * num_gkvec_loc);
acc::sync_stream(-1);
#endif
break;
}
}
}
template <typename T>
class D_operator : public Non_local_operator<T>
{
private:
void initialize()
{
auto& uc = this->ctx__.unit_cell();
for (int ia = 0; ia < uc.num_atoms(); ia++) {
int nbf = uc.atom(ia).mt_basis_size();
if (uc.atom(ia).type().spin_orbit_coupling()) {
// the pseudo potential contains information about
// spin orbit coupling so we use a different formula
// Eq.19 PRB 71 115106 for calculating the D matrix
// Note that the D matrices are stored and
// calculated in the up-down basis already not the
// (Veff,Bx,By,Bz) one.
for (int xi2 = 0; xi2 < nbf; xi2++) {
for (int xi1 = 0; xi1 < nbf; xi1++) {
int idx = xi2 * nbf + xi1;
for (int s = 0; s < 4; s++) {
this->op_(this->packed_mtrx_offset_(ia) + idx, s) =
type_wrapper<T>::bypass(uc.atom(ia).d_mtrx_so(xi1, xi2, s));
}
}
}
} else {
// No spin orbit coupling for this atom \f[D = D(V_{eff})
// I + D(B_x) \sigma_x + D(B_y) sigma_y + D(B_z)
// sigma_z\f] since the D matrices are calculated that
// way.
for (int xi2 = 0; xi2 < nbf; xi2++) {
for (int xi1 = 0; xi1 < nbf; xi1++) {
int idx = xi2 * nbf + xi1;
switch (this->ctx__.num_mag_dims()) {
case 3: {
double bx = uc.atom(ia).d_mtrx(xi1, xi2, 2);
double by = uc.atom(ia).d_mtrx(xi1, xi2, 3);
this->op_(this->packed_mtrx_offset_(ia) + idx, 2) =
type_wrapper<T>::bypass(double_complex(bx, -by));
this->op_(this->packed_mtrx_offset_(ia) + idx, 3) =
type_wrapper<T>::bypass(double_complex(bx, by));
}
case 1: {
double v = uc.atom(ia).d_mtrx(xi1, xi2, 0);
double bz = uc.atom(ia).d_mtrx(xi1, xi2, 1);
this->op_(this->packed_mtrx_offset_(ia) + idx, 0) = v + bz;
this->op_(this->packed_mtrx_offset_(ia) + idx, 1) = v - bz;
break;
}
case 0: {
this->op_(this->packed_mtrx_offset_(ia) + idx, 0) = uc.atom(ia).d_mtrx(xi1, xi2, 0);
break;
}
default: {
TERMINATE("wrong number of magnetic dimensions");
}
}
}
}
}
}
if (this->ctx__.control().print_checksum_ && this->ctx__.comm().rank() == 0) {
auto cs = this->op_.checksum();
print_checksum("D_operator", cs);
}
if (this->pu_ == GPU) {
this->op_.allocate(memory_t::device);
this->op_.template copy<memory_t::host, memory_t::device>();
}
}
public:
D_operator(Simulation_context const& ctx_)
: Non_local_operator<T>(ctx_)
{
this->op_ = mdarray<T, 2>(this->packed_mtrx_size_, ctx_.num_mag_dims() + 1);
this->op_.zero();
/* D-matrix is complex in non-collinear case */
if (ctx_.num_mag_dims() == 3) {
assert((std::is_same<T, double_complex>::value));
}
initialize();
};
};
template <typename T>
class Q_operator : public Non_local_operator<T>
{
private:
void initialize()
{
auto& uc = this->ctx__.unit_cell();
for (int ia = 0; ia < uc.num_atoms(); ia++) {
int iat = uc.atom(ia).type().id();
if (!uc.atom_type(iat).augment()) {
continue;
}
int nbf = uc.atom(ia).mt_basis_size();
for (int xi2 = 0; xi2 < nbf; xi2++) {
for (int xi1 = 0; xi1 < nbf; xi1++) {
/* The ultra soft pseudo potential has spin orbit coupling incorporated to it, so we
need to rotate the Q matrix */
if (this->ctx__.unit_cell().atom_type(iat).spin_orbit_coupling()) {
/* this is nothing else than Eq.18 of Ref PRB 71, 115106 */
for (auto si = 0; si < 2; si++) {
for (auto sj = 0; sj < 2; sj++) {
double_complex result(0, 0);
for (int xi2p = 0; xi2p < nbf; xi2p++) {
if (uc.atom(ia).type().compare_index_beta_functions(xi2, xi2p)) {
for (int xi1p = 0; xi1p < nbf; xi1p++) {
/* The F coefficients are already "block diagonal" so we do a full
summation. We actually rotate the q_matrices only */
if (uc.atom(ia).type().compare_index_beta_functions(xi1, xi1p)) {
result += this->ctx__.augmentation_op(iat).q_mtrx(xi1p, xi2p) *
(uc.atom(ia).type().f_coefficients(xi1, xi1p, sj, 0) *
uc.atom(ia).type().f_coefficients(xi2p, xi2, 0, si) +
uc.atom(ia).type().f_coefficients(xi1, xi1p, sj, 1) *
uc.atom(ia).type().f_coefficients(xi2p, xi2, 1, si));
}
}
}
}
/* the order of the index is important */
const int ind = (si == sj) ? si : sj + 2;
/* this gives
ind = 0 if si = up and sj = up
ind = 1 if si = sj = down
ind = 2 if si = down and sj = up
ind = 3 if si = up and sj = down */
this->op_(this->packed_mtrx_offset_(ia) + xi2 * nbf + xi1, ind) =
type_wrapper<T>::bypass(result);
}
}
} else {
for (int ispn = 0; ispn < this->ctx__.num_spins(); ispn++) {
this->op_(this->packed_mtrx_offset_(ia) + xi2 * nbf + xi1, ispn) =
this->ctx__.augmentation_op(iat).q_mtrx(xi1, xi2);
}
}
}
}
}
if (this->ctx__.control().print_checksum_ && this->ctx__.comm().rank() == 0) {
auto cs = this->op_.checksum();
print_checksum("Q_operator", cs);
}
if (this->pu_ == GPU) {
this->op_.allocate(memory_t::device);
this->op_.template copy<memory_t::host, memory_t::device>();
}
}
public:
Q_operator(Simulation_context const& ctx_)
: Non_local_operator<T>(ctx_)
{
/* Q-operator is independent of spin if there is no spin-orbit; however, it simplifies the apply()
* method if the Q-operator has a spin index */
this->op_ = mdarray<T, 2>(this->packed_mtrx_size_, ctx_.num_mag_dims() + 1);
this->op_.zero();
initialize();
}
};
template <typename T>
class P_operator : public Non_local_operator<T>
{
public:
P_operator(Simulation_context const& ctx_, mdarray<double_complex, 3>& p_mtrx__)
: Non_local_operator<T>(ctx_)
{
/* Q-operator is independent of spin */
this->op_ = mdarray<T, 2>(this->packed_mtrx_size_, 1);
this->op_.zero();
auto& uc = ctx_.unit_cell();
for (int ia = 0; ia < uc.num_atoms(); ia++) {
int iat = uc.atom(ia).type().id();
if (!uc.atom_type(iat).augment()) {
continue;
}
int nbf = uc.atom(ia).mt_basis_size();
for (int xi2 = 0; xi2 < nbf; xi2++) {
for (int xi1 = 0; xi1 < nbf; xi1++) {
this->op_(this->packed_mtrx_offset_(ia) + xi2 * nbf + xi1, 0) = -p_mtrx__(xi1, xi2, iat).real();
}
}
}
if (this->pu_ == GPU) {
this->op_.allocate(memory_t::device);
this->op_.template copy<memory_t::host, memory_t::device>();
}
}
};
}
#endif
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
const int* num_per_col,
int num_sample_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>* ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check ascii
if (!Common::CheckASCII(feature_name)) {
Log::Fatal("Do not support non-ASCII characters in feature name.");
}
// check json
if (!Common::CheckAllowedJSON(feature_name)) {
Log::Fatal("Do not support special JSON characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
GB_convert_bitmap_worker.c | //------------------------------------------------------------------------------
// GB_convert_bitmap_worker: construct triplets or CSC/CSR from bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If A is iso and Ax_new is not NULL, the iso scalar is expanded into the
// non-iso array Ax_new. Otherwise, if Ax_new and Ax are NULL then no values
// are extracted.
// TODO allow this function to do typecasting. Create 169 different versions
// for all 13x13 versions. Use this as part of Method 24, C=A assignment.
// Can also use typecasting for GB_Matrix_diag.
#include "GB.h"
#include "GB_partition.h"
GrB_Info GB_convert_bitmap_worker // extract CSC/CSR or triplets from bitmap
(
// outputs:
int64_t *restrict Ap, // vector pointers for CSC/CSR form
int64_t *restrict Ai, // indices for CSC/CSR or triplet form
int64_t *restrict Aj, // vector indices for triplet form
GB_void *restrict Ax_new, // values for CSC/CSR or triplet form
int64_t *anvec_nonempty, // # of non-empty vectors
// inputs: not modified
const GrB_Matrix A, // matrix to extract; not modified
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (GB_IS_BITMAP (A)) ;
ASSERT (Ap != NULL) ; // must be provided on input, size avdim+1
int64_t *restrict W = NULL ; size_t W_size = 0 ;
const int64_t avdim = A->vdim ;
const int64_t avlen = A->vlen ;
const size_t asize = A->type->size ;
//--------------------------------------------------------------------------
// count the entries in each vector
//--------------------------------------------------------------------------
const int8_t *restrict Ab = A->b ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (avlen*avdim, chunk, nthreads_max) ;
bool by_vector = (nthreads <= avdim) ;
if (by_vector)
{
//----------------------------------------------------------------------
// compute all vectors in parallel (no workspace)
//----------------------------------------------------------------------
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < avdim ; j++)
{
// ajnz = nnz (A (:,j))
int64_t ajnz = 0 ;
int64_t pA_start = j * avlen ;
for (int64_t i = 0 ; i < avlen ; i++)
{
// see if A(i,j) is present in the bitmap
int64_t p = i + pA_start ;
ajnz += Ab [p] ;
ASSERT (Ab [p] == 0 || Ab [p] == 1) ;
}
Ap [j] = ajnz ;
}
}
else
{
//----------------------------------------------------------------------
// compute blocks of rows in parallel
//----------------------------------------------------------------------
// allocate one row of W per thread, each row of length avdim
W = GB_MALLOC_WERK (nthreads * avdim, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (taskid = 0 ; taskid < nthreads ; taskid++)
{
int64_t *restrict Wtask = W + taskid * avdim ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, avlen, taskid, nthreads) ;
for (int64_t j = 0 ; j < avdim ; j++)
{
// ajnz = nnz (A (istart:iend-1,j))
int64_t ajnz = 0 ;
int64_t pA_start = j * avlen ;
for (int64_t i = istart ; i < iend ; i++)
{
// see if A(i,j) is present in the bitmap
int64_t p = i + pA_start ;
ajnz += Ab [p] ;
ASSERT (Ab [p] == 0 || Ab [p] == 1) ;
}
Wtask [j] = ajnz ;
}
}
// cumulative sum to compute nnz(A(:,j)) for each vector j
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < avdim ; j++)
{
int64_t ajnz = 0 ;
for (int taskid = 0 ; taskid < nthreads ; taskid++)
{
int64_t *restrict Wtask = W + taskid * avdim ;
int64_t c = Wtask [j] ;
Wtask [j] = ajnz ;
ajnz += c ;
}
Ap [j] = ajnz ;
}
}
//--------------------------------------------------------------------------
// cumulative sum of Ap
//--------------------------------------------------------------------------
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
GB_cumsum (Ap, avdim, anvec_nonempty, nth, Context) ;
int64_t anz = Ap [avdim] ;
ASSERT (anz == A->nvals) ;
//--------------------------------------------------------------------------
// gather the pattern and values from the bitmap
//--------------------------------------------------------------------------
// TODO: add type-specific versions for built-in types
const GB_void *restrict Ax = (GB_void *) (A->x) ;
const bool A_iso = A->iso ;
const bool numeric = (Ax_new != NULL && Ax != NULL) ;
if (by_vector)
{
//----------------------------------------------------------------------
// construct all vectors in parallel (no workspace)
//----------------------------------------------------------------------
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < avdim ; j++)
{
// gather from the bitmap into the new A (:,j)
int64_t pnew = Ap [j] ;
int64_t pA_start = j * avlen ;
for (int64_t i = 0 ; i < avlen ; i++)
{
int64_t p = i + pA_start ;
if (Ab [p])
{
// A(i,j) is in the bitmap
if (Ai != NULL) Ai [pnew] = i ;
if (Aj != NULL) Aj [pnew] = j ;
if (numeric)
{
// Ax_new [pnew] = Ax [p])
memcpy (Ax_new +(pnew)*asize,
Ax +(A_iso ? 0:(p)*asize), asize) ;
}
pnew++ ;
}
}
ASSERT (pnew == Ap [j+1]) ;
}
}
else
{
//----------------------------------------------------------------------
// compute blocks of rows in parallel
//----------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (taskid = 0 ; taskid < nthreads ; taskid++)
{
int64_t *restrict Wtask = W + taskid * avdim ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, avlen, taskid, nthreads) ;
for (int64_t j = 0 ; j < avdim ; j++)
{
// gather from the bitmap into the new A (:,j)
int64_t pnew = Ap [j] + Wtask [j] ;
int64_t pA_start = j * avlen ;
for (int64_t i = istart ; i < iend ; i++)
{
// see if A(i,j) is present in the bitmap
int64_t p = i + pA_start ;
if (Ab [p])
{
// A(i,j) is in the bitmap
if (Ai != NULL) Ai [pnew] = i ;
if (Aj != NULL) Aj [pnew] = j ;
if (numeric)
{
// Ax_new [pnew] = Ax [p] ;
memcpy (Ax_new +(pnew)*asize,
Ax +(A_iso ? 0:(p)*asize), asize) ;
}
pnew++ ;
}
}
}
}
}
//--------------------------------------------------------------------------
// free workspace return result
//--------------------------------------------------------------------------
GB_FREE_WERK (&W, W_size) ;
return (GrB_SUCCESS) ;
}
|
GB_unop__identity_fc64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_uint16)
// op(A') function: GB (_unop_tran__identity_fc64_uint16)
// C type: GxB_FC64_t
// A type: uint16_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_uint16)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_uint32
// op(A') function: GB_tran__lnot_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zkbdf_eval.c | /*
Name: zkbdf_eval.c
Author: Tan Teik Guan
Description: Eval function for VDF realization using ZKBoo. Modified from MPC_SHA256.c
*/
/*
============================================================================
Name : MPC_SHA256.c
Author : Sobuno
Version : 0.1
Description : MPC SHA256 for one block only
============================================================================
*/
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "shared.h"
#include <math.h>
#include "omp.h"
#define CH(e,f,g) ((e & f) ^ ((~e) & g))
int totalRandom = 0;
int totalSha = 0;
int totalSS = 0;
int totalHash = 0;
int NUM_ROUNDS = 100;
uint32_t rand32() {
uint32_t x;
x = rand() & 0xff;
x |= (rand() & 0xff) << 8;
x |= (rand() & 0xff) << 16;
x |= (rand() & 0xff) << 24;
return x;
}
void printbits(uint32_t n) {
if (n) {
printbits(n >> 1);
printf("%d", n & 1);
}
}
void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) {
z[0] = x[0] ^ y[0];
z[1] = x[1] ^ y[1];
z[2] = x[2] ^ y[2];
}
void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint32_t t[3] = { 0 };
t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1];
t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2];
t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0];
z[0] = t[0];
z[1] = t[1];
z[2] = t[2];
views[0].y[*countY] = z[0];
views[1].y[*countY] = z[1];
views[2].y[*countY] = z[2];
(*countY)++;
}
void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) {
z[0] = ~x[0];
z[1] = ~x[1];
z[2] = ~x[2];
}
void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t c[3] = { 0 };
uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint8_t a[3], b[3];
uint8_t t;
for(int i=0;i<31;i++)
{
a[0]=GETBIT(x[0]^c[0],i);
a[1]=GETBIT(x[1]^c[1],i);
a[2]=GETBIT(x[2]^c[2],i);
b[0]=GETBIT(y[0]^c[0],i);
b[1]=GETBIT(y[1]^c[1],i);
b[2]=GETBIT(y[2]^c[2],i);
t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i);
SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i));
t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i);
SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i));
t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i);
SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i));
}
z[0]=x[0]^y[0]^c[0];
z[1]=x[1]^y[1]^c[1];
z[2]=x[2]^y[2]^c[2];
views[0].y[*countY] = c[0];
views[1].y[*countY] = c[1];
views[2].y[*countY] = c[2];
*countY += 1;
}
void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t c[3] = { 0 };
uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint8_t a[3], b[3];
uint8_t t;
for(int i=0;i<31;i++)
{
a[0]=GETBIT(x[0]^c[0],i);
a[1]=GETBIT(x[1]^c[1],i);
a[2]=GETBIT(x[2]^c[2],i);
b[0]=GETBIT(y^c[0],i);
b[1]=GETBIT(y^c[1],i);
b[2]=GETBIT(y^c[2],i);
t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i);
SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i));
t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i);
SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i));
t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i);
SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i));
}
z[0]=x[0]^y^c[0];
z[1]=x[1]^y^c[1];
z[2]=x[2]^y^c[2];
views[0].y[*countY] = c[0];
views[1].y[*countY] = c[1];
views[2].y[*countY] = c[2];
*countY += 1;
}
int sha256(unsigned char* result, unsigned char* input, int numBits) {
uint32_t hA[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
int remainingBits = numBits;
int chars;
int i;
while (remainingBits >= 0)
{
if (remainingBits > 447)
{
chars = 64;
remainingBits -= 512;
}
else
{
chars = remainingBits >> 3;
remainingBits = -1;
}
unsigned char* chunk = calloc(64, 1); //512 bits
memcpy(chunk, input, chars);
input += chars;
if (chars < 64)
{
chunk[chars] = 0x80;
chunk[60] = numBits >> 24;
chunk[61] = numBits >> 16;
chunk[62] = numBits >> 8;
chunk[63] = numBits;
}
uint32_t w[64];
for (i = 0; i < 16; i++) {
w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16)
| (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3];
}
uint32_t s0, s1;
for (i = 16; i < 64; i++) {
s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18)
^ (w[i - 15] >> 3);
s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19)
^ (w[i - 2] >> 10);
w[i] = w[i - 16] + s0 + w[i - 7] + s1;
}
uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj;
a = hA[0];
b = hA[1];
c = hA[2];
d = hA[3];
e = hA[4];
f = hA[5];
g = hA[6];
h = hA[7];
for (i = 0; i < 64; i++) {
s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25);
temp1 = h + s1 + CH(e, f, g) + k[i] + w[i];
s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22);
maj = (a & (b ^ c)) ^ (b & c);
temp2 = s0 + maj;
h = g;
g = f;
f = e;
e = d + temp1;
d = c;
c = b;
b = a;
a = temp1 + temp2;
}
hA[0] += a;
hA[1] += b;
hA[2] += c;
hA[3] += d;
hA[4] += e;
hA[5] += f;
hA[6] += g;
hA[7] += h;
}
for (i = 0; i < 8; i++) {
result[i * 4] = (hA[i] >> 24);
result[i * 4 + 1] = (hA[i] >> 16);
result[i * 4 + 2] = (hA[i] >> 8);
result[i * 4 + 3] = hA[i];
}
return 0;
}
void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) {
z[0] = RIGHTROTATE(x[0], i);
z[1] = RIGHTROTATE(x[1], i);
z[2] = RIGHTROTATE(x[2], i);
}
void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) {
z[0] = x[0] >> i;
z[1] = x[1] >> i;
z[2] = x[2] >> i;
}
void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t t0[3];
uint32_t t1[3];
mpc_XOR(a, b, t0);
mpc_XOR(a, c, t1);
mpc_AND(t0, t1, z, randomness, randCount, views, countY);
mpc_XOR(z, a, z);
}
void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t t0[3];
//e & (f^g) ^ g
mpc_XOR(f,g,t0);
mpc_AND(e,t0,t0, randomness, randCount, views, countY);
mpc_XOR(t0,g,z);
}
int mpc_sha256(unsigned char* results[3], unsigned char inputs[3][BLOCK_SIZE], int numBits, int addView, uint32_t hA[8][3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
/*
if (numBits > 447) {
printf("Input too long, aborting!");
return -1;
}
*/
int chars = numBits >> 3;
unsigned char* chunks[3];
uint32_t w[64][3];
uint32_t msg[MSG_SIZE/4];
/*
if (addMsg)
{
for (int j=0;j<(numBits/32);j++)
{
msg[j] = (addMsg[j*4]<<24) | (addMsg[j*4+1]<<16) | (addMsg[j*4+2] << 8) | (addMsg[j*4+3]);
}
}
*/
for (int i =0; i<64;i++)
{
w[i][0]=w[i][1]=w[i][2] = 0;
}
for (int i = 0; i < 3; i++) {
chunks[i] = calloc(64, 1); //512 bits
memcpy(chunks[i], inputs[i], BLOCK_SIZE /*chars*/);
/*
chunks[i][chars] = 0x80;
//Last 8 chars used for storing length of input without padding, in big-endian.
//Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest
chunk[60] = numBits >> 24;
chunk[61] = numBits >> 16;
chunks[i][62] = numBits >> 8;
chunks[i][63] = numBits;
*/
if (addView)
memcpy(views[i].x, chunks[i], 64);
for (int j = 0; j < 16; j++) {
w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16)
| (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3];
}
free(chunks[i]);
}
uint32_t s0[3], s1[3];
uint32_t t0[3], t1[3];
for (int j = 16; j < 64; j++) {
//s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3);
mpc_RIGHTROTATE(w[j-15], 7, t0);
mpc_RIGHTROTATE(w[j-15], 18, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTSHIFT(w[j-15], 3, t1);
mpc_XOR(t0, t1, s0);
//s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10);
mpc_RIGHTROTATE(w[j-2], 17, t0);
mpc_RIGHTROTATE(w[j-2], 19, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTSHIFT(w[j-2], 10, t1);
mpc_XOR(t0, t1, s1);
//w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i];
mpc_ADD(w[j-16], s0, t1, randomness, randCount, views, countY);
mpc_ADD(w[j-7], t1, t1, randomness, randCount, views, countY);
mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY);
}
uint32_t a[3] = { hA[0][0],hA[0][1],hA[0][2] };
uint32_t b[3] = { hA[1][0],hA[1][1],hA[1][2] };
uint32_t c[3] = { hA[2][0],hA[2][1],hA[2][2] };
uint32_t d[3] = { hA[3][0],hA[3][1],hA[3][2] };
uint32_t e[3] = { hA[4][0],hA[4][1],hA[4][2] };
uint32_t f[3] = { hA[5][0],hA[5][1],hA[5][2] };
uint32_t g[3] = { hA[6][0],hA[6][1],hA[6][2] };
uint32_t h[3] = { hA[7][0],hA[7][1],hA[7][2] };
uint32_t temp1[3], temp2[3], maj[3];
for (int i = 0; i < 64; i++) {
//s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25);
mpc_RIGHTROTATE(e, 6, t0);
mpc_RIGHTROTATE(e, 11, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTROTATE(e, 25, t1);
mpc_XOR(t0, t1, s1);
//ch = (e & f) ^ ((~e) & g);
//temp1 = h + s1 + CH(e,f,g) + k[i]+w[i];
//t0 = h + s1
mpc_ADD(h, s1, t0, randomness, randCount, views, countY);
mpc_CH(e, f, g, t1, randomness, randCount, views, countY);
//t1 = t0 + t1 (h+s1+ch)
mpc_ADD(t0, t1, t1, randomness, randCount, views, countY);
mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY);
mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY);
//s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22);
mpc_RIGHTROTATE(a, 2, t0);
mpc_RIGHTROTATE(a, 13, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTROTATE(a, 22, t1);
mpc_XOR(t0, t1, s0);
mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY);
//temp2 = s0+maj;
mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY);
memcpy(h, g, sizeof(uint32_t) * 3);
memcpy(g, f, sizeof(uint32_t) * 3);
memcpy(f, e, sizeof(uint32_t) * 3);
//e = d+temp1;
mpc_ADD(d, temp1, e, randomness, randCount, views, countY);
memcpy(d, c, sizeof(uint32_t) * 3);
memcpy(c, b, sizeof(uint32_t) * 3);
memcpy(b, a, sizeof(uint32_t) * 3);
//a = temp1+temp2;
mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY);
}
/*
uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] },
{ hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } };
*/
uint32_t hHa[8][3];
mpc_ADD(hA[0], a, hHa[0], randomness, randCount, views, countY);
mpc_ADD(hA[1], b, hHa[1], randomness, randCount, views, countY);
mpc_ADD(hA[2], c, hHa[2], randomness, randCount, views, countY);
mpc_ADD(hA[3], d, hHa[3], randomness, randCount, views, countY);
mpc_ADD(hA[4], e, hHa[4], randomness, randCount, views, countY);
mpc_ADD(hA[5], f, hHa[5], randomness, randCount, views, countY);
mpc_ADD(hA[6], g, hHa[6], randomness, randCount, views, countY);
mpc_ADD(hA[7], h, hHa[7], randomness, randCount, views, countY);
for (int i = 0; i < 8; i++)
{
hA[i][0] = hHa[i][0];
hA[i][1] = hHa[i][1];
hA[i][2] = hHa[i][2];
}
for (int i = 0; i < 8; i++) {
mpc_RIGHTSHIFT(hHa[i], 24, t0);
results[0][i * 4] = t0[0];
results[1][i * 4] = t0[1];
results[2][i * 4] = t0[2];
mpc_RIGHTSHIFT(hHa[i], 16, t0);
results[0][i * 4 + 1] = t0[0];
results[1][i * 4 + 1] = t0[1];
results[2][i * 4 + 1] = t0[2];
mpc_RIGHTSHIFT(hHa[i], 8, t0);
results[0][i * 4 + 2] = t0[0];
results[1][i * 4 + 2] = t0[1];
results[2][i * 4 + 2] = t0[2];
results[0][i * 4 + 3] = hHa[i][0];
results[1][i * 4 + 3] = hHa[i][1];
results[2][i * 4 + 3] = hHa[i][2];
}
return 0;
}
int writeToFile(char filename[], void* data, int size, int numItems) {
FILE *file;
file = fopen(filename, "wb");
if (!file) {
printf("Unable to open file!");
return 1;
}
fwrite(data, size, numItems, file);
fclose(file);
return 0;
}
int secretShare(unsigned char* input, int numBytes, unsigned char output[3][numBytes]) {
if(RAND_bytes(output[0], numBytes) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
}
if(RAND_bytes(output[1], numBytes) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
}
for (int j = 0; j < numBytes; j++) {
output[2][j] = input[j] ^ output[0][j] ^ output[1][j];
}
return 0;
}
int mpc_hmac_sha256(unsigned char* results[3], unsigned char ek[3][BLOCK_SIZE], int numBytes, char * Cha, unsigned char *randomness[3], int* randCount, View views[3], int* countY)
{
unsigned char shares[3][BLOCK_SIZE];
uint32_t hA[8][3];
int i;
unsigned char* innerhash[3],*outerhash[3];
innerhash[0] = malloc(32);
innerhash[1] = malloc(32);
innerhash[2] = malloc(32);
outerhash[0] = malloc(32);
outerhash[1] = malloc(32);
outerhash[2] = malloc(32);
if (strlen(Cha) > MSG_SIZE)
{
printf("Input too long, aborting!");
return -1;
}
for (i=0;i<8;i++)
hA[i][0] = hA[i][1] = hA[i][2] = ihA[i];
memset(shares[0],0,sizeof(shares[0]));
memset(shares[1],0,sizeof(shares[1]));
memset(shares[2],0,sizeof(shares[2]));
for (i = 0; i < BLOCK_SIZE;i++)
{
shares[0][i] = ek[0][i] ^ 0x36;
shares[1][i] = ek[1][i] ^ 0x36;
shares[2][i] = ek[2][i] ^ 0x36;
}
mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY);
memset(shares[0],0,sizeof(shares[0]));
memset(shares[1],0,sizeof(shares[1]));
memset(shares[2],0,sizeof(shares[2]));
for (i = 0; i < strlen(Cha);i++)
{
shares[0][i] = Cha[i];
shares[1][i] = Cha[i];
shares[2][i] = Cha[i];
}
shares[0][strlen(Cha)] = shares[1][strlen(Cha)] = shares[2][strlen(Cha)] = 0x80;
shares[0][61] = shares[1][61] = shares[2][61] = (((strlen(Cha)* 8)+512) >> 16) & 0xFF;
shares[0][62] = shares[1][62] = shares[2][62] = (((strlen(Cha)* 8)+512) >> 8) & 0xFF;
shares[0][63] = shares[1][63] = shares[2][63] = ((strlen(Cha)* 8)+512) & 0xFF;
mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY);
for (i=0;i<8;i++)
hA[i][0] = hA[i][1] = hA[i][2] = ihA[i];
memset(shares,0,3*BLOCK_SIZE);
for (i = 0; i < BLOCK_SIZE;i++)
{
shares[0][i] = ek[0][i] ^ 0x5c;
shares[1][i] = ek[1][i] ^ 0x5c;
shares[2][i] = ek[2][i] ^ 0x5c;
}
mpc_sha256(outerhash, shares, 512, 0, hA, randomness, randCount, views, countY);
memset(shares,0,3*BLOCK_SIZE);
for (i = 0; i < 32;i++)
{
shares[0][i] = innerhash[0][i];
shares[1][i] = innerhash[1][i];
shares[2][i] = innerhash[2][i];
}
shares[0][32] = shares[1][32] = shares[2][32] = 0x80;
shares[0][62] = shares[1][62] = shares[2][62] = 3;
mpc_sha256(results, shares, 512, 0, hA, randomness, randCount, views, countY);
free(innerhash[0]);
free(innerhash[1]);
free(innerhash[2]);
free(outerhash[0]);
free(outerhash[1]);
free(outerhash[2]);
return 0;
}
a commit(int numBytes, unsigned char shares[3][BLOCK_SIZE], char * Cha, unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) {
unsigned char* hashes[3];
hashes[0] = malloc(32);
hashes[1] = malloc(32);
hashes[2] = malloc(32);
int* randCount = calloc(1, sizeof(int));
int* countY = calloc(1, sizeof(int));
uint32_t hA[8][3];
int i;
for (i=0;i<8;i++)
hA[i][0] = hA[i][1] = hA[i][2] = ihA[i];
*countY = 0;
shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = 0x80;
shares[0][62] = shares[1][62] = shares[2][62] = ((numBytes * 8) >> 8) & 0xFF;
shares[0][63] = shares[1][63] = shares[2][63] = (numBytes * 8) & 0xFF;
mpc_sha256(hashes, shares, numBytes * 8, 1, hA, randomness, randCount, views, countY);
unsigned char * hmac[3];
hmac[0] = malloc(32);
hmac[1] = malloc(32);
hmac[2] = malloc(32);
shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = shares[0][62] = shares[1][62] = shares[2][62] = shares[0][63] = shares[1][63] = shares[2][63] = 0;
mpc_hmac_sha256(hmac, shares, numBytes, Cha, randomness, randCount, views, countY);
//Explicitly add y to view
free(randCount);
for(int i = 0; i<8; i++) {
views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16)
| (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3];
views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16)
| (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3];
views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16)
| (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3];
*countY += 1;
}
for(int i = 0; i<8; i++) {
views[0].y[*countY] = (hmac[0][i * 4] << 24) | (hmac[0][i * 4 + 1] << 16)
| (hmac[0][i * 4 + 2] << 8) | hmac[0][i * 4 + 3];
views[1].y[*countY] = (hmac[1][i * 4] << 24) | (hmac[1][i * 4 + 1] << 16)
| (hmac[1][i * 4 + 2] << 8) | hmac[1][i * 4 + 3];
views[2].y[*countY] = (hmac[2][i * 4] << 24) | (hmac[2][i * 4 + 1] << 16)
| (hmac[2][i * 4 + 2] << 8) | hmac[2][i * 4 + 3];
*countY += 1;
}
free(countY);
free(hashes[0]);
free(hashes[1]);
free(hashes[2]);
free(hmac[0]);
free(hmac[1]);
free(hmac[2]);
uint32_t* result11 = malloc(32);
uint32_t* result21 = malloc(32);
output(views[0], result11,result21);
uint32_t* result12 = malloc(32);
uint32_t* result22 = malloc(32);
output(views[1], result12, result22);
uint32_t* result13 = malloc(32);
uint32_t* result23 = malloc(32);
output(views[2], result13,result23);
a a;
memcpy(a.yp1[0], result11, 32);
memcpy(a.yp1[1], result12, 32);
memcpy(a.yp1[2], result13, 32);
memcpy(a.yp2[0], result21, 32);
memcpy(a.yp2[1], result22, 32);
memcpy(a.yp2[2], result23, 32);
free(result11);
free(result12);
free(result13);
free(result21);
free(result22);
free(result23);
return a;
}
z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) {
z z;
memcpy(z.ke, keys[e], 16);
memcpy(z.ke1, keys[(e + 1) % 3], 16);
z.ve = views[e];
z.ve1 = views[(e + 1) % 3];
memcpy(z.re, rs[e],4);
memcpy(z.re1, rs[(e + 1) % 3],4);
return z;
}
int GetNextSelected(int size,unsigned char * data, int *dataPtr)
{
int value=0;
int modulo = size;
while (size > 0)
{
value <<=8;
value += (int) data[*dataPtr];
size >>=8;
(*dataPtr)++;
}
if (!(value & 0x01)) // will return odd number
value++;
return (int) value % modulo;
}
Merkle * BuildMerkleTree(int NumRounds,z * zs)
{
int i;
Merkle * tempNode;
Merkle * startNode = NULL;
Merkle * childNode;
Merkle * prevNode;
int done = 0;
int odd = 0;
unsigned char datablock[64];
if ((!zs) || (NumRounds < 2))
return NULL;
prevNode = NULL;
for (i=0; i < NumRounds;i++)
{
tempNode = malloc(sizeof(Merkle));
if (i==0)
startNode = tempNode;
sha256(tempNode->data,(unsigned char *)&(zs[i]),sizeof(z) * 8);
tempNode->parent = NULL;
tempNode->type = 0;
tempNode->next = NULL;
tempNode->previous = prevNode;
if (prevNode)
prevNode->next = tempNode;
if (!odd)
{
tempNode->sibling = NULL;
odd = 1;
}
else
{
prevNode->sibling = tempNode;
tempNode->sibling = prevNode;
odd = 0;
}
prevNode = tempNode;
}
while (!done)
{
childNode = startNode;
while (childNode->parent)
childNode = childNode->parent;
if (!childNode->sibling)
{
done = 1;
continue;
}
odd = 0;
prevNode = NULL;
while (childNode != NULL)
{
tempNode = malloc(sizeof(Merkle));
tempNode->type = 1;
childNode->parent = tempNode;
tempNode->previous = prevNode;
if (prevNode)
prevNode->next = tempNode;
tempNode->next = NULL;
tempNode->parent = NULL;
if (!odd)
{
tempNode->sibling = NULL;
odd = 1;
}
else
{
prevNode->sibling = tempNode;
tempNode->sibling = prevNode;
odd = 0;
}
if (childNode->sibling)
{
childNode->sibling->parent = tempNode;
memcpy(datablock,childNode->data,32);
memcpy(&(datablock[32]),childNode->sibling->data,32);
sha256(tempNode->data,datablock,64*8);
childNode = childNode->sibling->next;
}
else
{
memset(datablock,0,sizeof(datablock));
memcpy(datablock,childNode->data,32);
sha256(tempNode->data,datablock,64*8);
childNode = childNode->sibling;
}
prevNode = tempNode;
}
}
return startNode;
}
void DestroyMerkleTree(Merkle * startNode)
{
Merkle * tempNode;
if (startNode->parent)
DestroyMerkleTree(startNode->parent);
startNode->parent = NULL;
while (startNode)
{
tempNode = startNode->next;
free(startNode);
startNode = tempNode;
}
return;
}
#define NUM_LOOPS 1
int main(int argc, char * argv[]) {
setbuf(stdout, NULL);
srand((unsigned) time(NULL));
init_EVP();
openmp_thread_setup();
char CHALLENGE[BLOCK_SIZE];
char ek[BLOCK_SIZE]; //eval key is 447 bits
//
if (argc != 4)
{
printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char)> <eval key (Max %d char)>\n",argv[0],MSG_SIZE,MSG_SIZE);
return -1;
}
NUM_ROUNDS = atoi(argv[1]);
if ((NUM_ROUNDS & 0x01) || (NUM_ROUNDS < 4))
{
printf("Number of rounds should be even and > 4\n");
return -1;
}
unsigned char garbage[4];
if(RAND_bytes(garbage, 4) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
memset(CHALLENGE,0,sizeof(CHALLENGE));
strncpy(CHALLENGE,argv[2],MSG_SIZE); //55 is max length as we only support 447 bits = 55.875 bytes
memset(ek,0,sizeof(ek));
strncpy(ek,argv[3],MSG_SIZE);
int i = strlen(ek);
printf("ek length: %d\n", i);
printf("Iterations of ZKBdf: %d\n", NUM_ROUNDS);
unsigned char input[BLOCK_SIZE]; // 512 bits
memset(input,0,sizeof(input));
memcpy(input,ek,sizeof(input));
struct timeval begin, delta;
gettimeofday(&begin,NULL);
unsigned char rs[NUM_ROUNDS][3][4];
unsigned char keys[NUM_ROUNDS][3][16];
a as[NUM_ROUNDS];
View localViews[NUM_ROUNDS][3];
int totalCrypto = 0;
z* zs;
for(int loops=0;loops<NUM_LOOPS;loops++)
{
//Generating keys
if(RAND_bytes((unsigned char *) keys, NUM_ROUNDS*3*16) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
if(RAND_bytes((unsigned char *)rs, NUM_ROUNDS*3*4) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
//Sharing secrets
unsigned char shares[NUM_ROUNDS][3][BLOCK_SIZE];
memset(shares,0,NUM_ROUNDS*3*BLOCK_SIZE);
if(RAND_bytes((unsigned char *)shares, NUM_ROUNDS*3*BLOCK_SIZE) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
#pragma omp parallel for
for(int k=0; k<NUM_ROUNDS; k++) {
for (int j = 0; j < i; j++) {
shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j];
}
for (int j = i; j < BLOCK_SIZE; j++) {
shares[k][2][j] = shares[k][0][j] = shares[k][1][j] = 0;
}
}
unsigned char *randomness[NUM_ROUNDS][3];
int es[NUM_ROUNDS];
uint32_t finalHash1[8];
uint32_t finalHash2[8];
zs = malloc(sizeof(z)*NUM_ROUNDS);
int r;
for (r=0;r<NUM_ROUNDS;r++)
{
unsigned char plaintext[16];
memset(plaintext,0x30,sizeof(plaintext));
if (r!=0)
{
SHA256_CTX ctx;
unsigned char prevroundhash[SHA256_DIGEST_LENGTH];
SHA256_Init(&ctx);
SHA256_Update(&ctx, &(zs[r-1]), sizeof(z));
SHA256_Final(prevroundhash, &ctx);
memcpy(plaintext,prevroundhash,sizeof(plaintext));
}
//Generating randomness
// #pragma omp parallel for
// for(int k=0; k<(NUM_ROUNDS); k++) {
for(int j = 0; j<3; j++) {
randomness[r][j] = malloc((ySize*4)*sizeof(unsigned char));
getAllRandomness(keys[r][j], plaintext, randomness[r][j]);
}
// }
//Running MPC-SHA2
// #pragma omp parallel for
// for(int k=0; k<NUM_ROUNDS; k++) {
as[r] = commit(i, shares[r], CHALLENGE, randomness[r], rs[r], localViews[r]);
for(int j=0; j<3; j++) {
free(randomness[r][j]);
}
// }
//Committing
// #pragma omp parallel for
// for(int k=0; k<(NUM_ROUNDS); k++) {
unsigned char hash1[SHA256_DIGEST_LENGTH];
memset(hash1,0,sizeof(hash1));
H(keys[r][0], localViews[r][0], rs[r][0], hash1);
memcpy(as[r].h[0], &hash1, 32);
H(keys[r][1], localViews[r][1], rs[r][1], hash1);
memcpy(as[r].h[1], &hash1, 32);
H(keys[r][2], localViews[r][2], rs[r][2], hash1);
memcpy(as[r].h[2], &hash1, 32);
// }
//Generating E
if (r==0)
{
for (int j = 0; j < 8; j++) {
finalHash1[j] = as[0].yp1[0][j]^as[0].yp1[1][j]^as[0].yp1[2][j];
finalHash2[j] = as[0].yp2[0][j]^as[0].yp2[1][j]^as[0].yp2[2][j];
}
printf("output H(ek) = ");
for (int i = 0; i< 8;i++)
{
printf("%02X",finalHash1[i]);
}
printf("\n");
printf("output HMAC(ek,Challenge) = ");
for (int i = 0; i< 8;i++)
{
printf("%02X",finalHash2[i]);
}
printf("\n");
}
H3(finalHash1, finalHash2, &(as[r]), /*NUM_ROUNDS*/ 1, &(es[r]));
//Packing Z
// #pragma omp parallel for
// for(int i = 0; i<(NUM_ROUNDS); i++) {
zs[r] = prove(es[r],keys[r],rs[r], localViews[r]);
// }
}
}
// now to extract the PCP proofs
int PCProunds = (int) ceil(log(NUM_ROUNDS)/log(2));
int Totalselected = 0;
unsigned char PCPselected[NUM_ROUNDS];
Merkle * startNode = NULL;
Merkle * currNode = NULL;
Merkle * tempNode = NULL;
Merkle * rootNode = NULL;
unsigned char MerkleHash[64];
unsigned char MerkleBranch[(32*2*PCProunds)+32];
int MerkleHashPtr;
int Nextselected;
startNode = BuildMerkleTree(NUM_ROUNDS,zs);
rootNode = startNode;
while (rootNode->parent)
rootNode = rootNode->parent;
memset(MerkleHash,0,sizeof(MerkleHash));
memcpy(&(MerkleHash[32]),rootNode->data,32);
sha256(MerkleHash,MerkleHash,64*8);
MerkleHashPtr = 0;
memset(PCPselected,0,sizeof(PCPselected));
while (Totalselected < PCProunds)
{
Nextselected = GetNextSelected(NUM_ROUNDS,MerkleHash,&MerkleHashPtr);
if (!PCPselected[Nextselected])
{
PCPselected[Nextselected] = 1;
Totalselected++;
}
if (MerkleHashPtr >= 32)
{
sha256(MerkleHash,MerkleHash,64*8);
MerkleHashPtr = 0;
}
}
gettimeofday(&delta,NULL);
unsigned long inMilli = (delta.tv_sec - begin.tv_sec)*1000000 + (delta.tv_usec - begin.tv_usec);
inMilli /= 1000;
//Writing ZKBoo proofs to file
FILE *file;
char outputFile[3*sizeof(int) + 8];
sprintf(outputFile, "out%i.bin", NUM_ROUNDS);
file = fopen(outputFile, "wb");
if (!file) {
printf("Unable to open file!");
return 1;
}
fwrite(as, sizeof(a), NUM_ROUNDS, file);
fwrite(zs, sizeof(z), NUM_ROUNDS, file);
fclose(file);
// writing PCP proofs to file
sprintf(outputFile, "pcp%i-%i.bin", NUM_ROUNDS,PCProunds);
file = fopen(outputFile, "wb");
if (!file) {
printf("Unable to open file!");
return 1;
}
currNode = startNode;
fwrite(rootNode->data,32,1,file); // write the root node first
tempNode = startNode;
for (int k =0;k<NUM_ROUNDS;k++)
{
fwrite(tempNode->data,32,1,file);
tempNode = tempNode->next;
}
for (int j = 0; j < NUM_ROUNDS; j++)
{
if (PCPselected[j])
{
// print current node
tempNode = currNode;
memset(MerkleBranch,0,sizeof(MerkleBranch));
MerkleHashPtr = 0;
while(tempNode->parent != NULL) // write the current node
{
if (tempNode->sibling)
{
if (tempNode->sibling == tempNode->next)
{
memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->data,32);
MerkleHashPtr += 32;
memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->sibling->data,32);
MerkleHashPtr += 32;
}
else
{
memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->sibling->data,32);
MerkleHashPtr += 32;
memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->data,32);
MerkleHashPtr += 32;
}
}
else
{
memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->data,32);
MerkleHashPtr += 64;
}
tempNode = tempNode->parent;
}
fwrite(MerkleBranch,MerkleHashPtr,1,file);
fwrite(&(as[j]), sizeof(a), 1, file);
fwrite(&(zs[j]), sizeof(z), 1, file);
fwrite(&(as[j-1]), sizeof(a), 1, file);
fwrite(&(zs[j-1]), sizeof(z), 1, file);
}
currNode = currNode->next;
}
DestroyMerkleTree(startNode);
fclose(file);
free(zs);
printf("Total time taken for %d loops: %d mili-seconds\n",NUM_LOOPS,inMilli);
printf("Time per loop: %d mili-seconds\n",inMilli/NUM_LOOPS);
printf("\n");
printf("zkboo Proof output to file %s", outputFile);
openmp_thread_cleanup();
cleanup_EVP();
return EXIT_SUCCESS;
}
|
GB_binop__isne_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isne_int16
// A.*B function (eWiseMult): GB_AemultB__isne_int16
// A*D function (colscale): GB_AxD__isne_int16
// D*A function (rowscale): GB_DxB__isne_int16
// C+=B function (dense accum): GB_Cdense_accumB__isne_int16
// C+=b function (dense accum): GB_Cdense_accumb__isne_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int16
// C=scalar+B GB_bind1st__isne_int16
// C=scalar+B' GB_bind1st_tran__isne_int16
// C=A+scalar GB_bind2nd__isne_int16
// C=A'+scalar GB_bind2nd_tran__isne_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isne_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isne_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isne_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isne_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isne_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isne_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isne_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isne_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isne_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__isne_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__isne_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(8*t3+Nx+4,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),32*t4+30),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
mpush3.c | /* C Library for Skeleton 3D Electrostatic OpenMP PIC Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include "mpush3.h"
/*--------------------------------------------------------------------*/
double ranorm() {
/* this program calculates a random number y from a gaussian distribution
with zero mean and unit variance, according to the method of
mueller and box:
y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1))
y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)),
where x is a random number uniformly distributed on (0,1).
written for the ibm by viktor k. decyk, ucla
local data */
static int r1 = 885098780, r2 = 1824280461;
static int r4 = 1396483093, r5 = 55318673;
static int iflg = 0;
static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0;
static double r0 = 0.0;
int isc, i1;
double ranorm, r3, asc, bsc, temp;
if (iflg==1) {
ranorm = r0;
r0 = 0.0;
iflg = 0;
return ranorm;
}
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r1 - (r1/isc)*isc;
r3 = h1l*(double) r1 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r2/isc;
isc = r2 - i1*isc;
r0 = h1l*(double) r2 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r2 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r1 = r3 - ((double) isc)*bsc;
temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc));
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r4 - (r4/isc)*isc;
r3 = h2l*(double) r4 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r5/isc;
isc = r5 - i1*isc;
r0 = h2l*(double) r5 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r5 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r4 = r3 - ((double) isc)*bsc;
r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc);
ranorm = temp*sin(r0);
r0 = temp*cos(r0);
iflg = 1;
return ranorm;
}
/*--------------------------------------------------------------------*/
void cdistr3(float part[], float vtx, float vty, float vtz, float vdx,
float vdy, float vdz, int npx, int npy, int npz, int idimp,
int nop, int nx, int ny, int nz, int ipbc) {
/* for 3d code, this subroutine calculates initial particle co-ordinates
and velocities with uniform density and maxwellian velocity with drift
part[n][0] = position x of particle n
part[n][1] = position y of particle n
part[n][2] = position z of particle n
part[n][3] = velocity vx of particle n
part[n][4] = velocity vy of particle n
part[n][5] = velocity vz of particle n
vtx/vty/vtz = thermal velocity of electrons in x/y/z direction
vdx/vdy/vdz = drift velocity of beam electrons in x/y/z direction
npx/npy/npz = initial number of particles distributed in x/y/z
direction
idimp = size of phase space = 6
nop = number of particles
nx/ny/nz = system length in x/y/z direction
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
ranorm = gaussian random number with zero mean and unit variance
local data */
int j, k, l, k1, l1, npxy, npxyz;
float edgelx, edgely, edgelz, at1, at2, at3, at4, at5;
float sum1, sum2, sum3;
double dsum1, dsum2, dsum3;
npxy = npx*npy;
npxyz = npxy*npz;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
edgelz = 0.0;
at1 = (float) nx/(float) npx;
at2 = (float) ny/(float) npy;
at3 = (float) nz/(float) npz;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
edgelz = 1.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) (ny-2)/(float) npy;
at3 = (float) (nz-2)/(float) npz;
}
else if (ipbc==3) {
edgelx = 1.0;
edgely = 1.0;
edgelz = 0.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) (ny-2)/(float) npy;
}
/* uniform density profile */
for (l = 0; l < npz; l++) {
l1 = idimp*npxy*l;
at5 = edgelz + at3*(((float) l) + 0.5);
for (k = 0; k < npy; k++) {
k1 = idimp*npx*k + l1;
at4 = edgely + at2*(((float) k) + 0.5);
for (j = 0; j < npx; j++) {
part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5);
part[1+idimp*j+k1] = at4;
part[2+idimp*j+k1] = at5;
}
}
}
/* maxwellian velocity distribution */
for (j = 0; j < npxyz; j++) {
part[3+idimp*j] = vtx*ranorm();
part[4+idimp*j] = vty*ranorm();
part[5+idimp*j] = vtz*ranorm();
}
/* add correct drift */
dsum1 = 0.0;
dsum2 = 0.0;
dsum3 = 0.0;
for (j = 0; j < npxyz; j++) {
dsum1 += part[3+idimp*j];
dsum2 += part[4+idimp*j];
dsum3 += part[5+idimp*j];
}
sum1 = dsum1;
sum2 = dsum2;
sum3 = dsum3;
at1 = 1.0/(float) npxyz;
sum1 = at1*sum1 - vdx;
sum2 = at1*sum2 - vdy;
sum3 = at1*sum3 - vdz;
for (j = 0; j < npxyz; j++) {
part[3+idimp*j] -= sum1;
part[4+idimp*j] -= sum2;
part[5+idimp*j] -= sum3;
}
return;
}
/*--------------------------------------------------------------------*/
void cdblkp3l(float part[], int kpic[], int *nppmx, int idimp, int nop,
int mx, int my, int mz, int mx1, int my1, int mxyz1,
int *irc) {
/* this subroutine finds the maximum number of particles in each tile of
mx, my, mz to calculate size of segmented particle array ppart
linear interpolation
input: all except kpic, nppmx, output: kpic, nppmx
part = input particle array
part[n][0] = position x of particle n
part[n][1] = position y of particle n
part[n][2] = position z of particle n
kpic = output number of particles per tile
nppmx = return maximum number of particles in tile
idimp = size of phase space = 6
nop = number of particles
mx/my/mz = number of grids in sorting cell in x, y and z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int j, k, n, m, l, mxy1, isum, ist, npx, ierr;
ierr = 0;
mxy1 = mx1*my1;
/* clear counter array */
for (k = 0; k < mxyz1; k++) {
kpic[k] = 0;
}
/* find how many particles in each tile */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
n = n/mx;
m = part[1+idimp*j];
m = m/my;
l = part[2+idimp*j];
l = l/mz;
m = n + mx1*m + mxy1*l;
if (m < mxyz1) {
kpic[m] += 1;
}
else {
ierr = ierr > (m - mxyz1 + 1) ? ierr : (m - mxyz1 + 1);
}
}
/* find maximum */
isum = 0;
npx = 0;
for (k = 0; k < mxyz1; k++) {
ist = kpic[k];
npx = npx > ist ? npx : ist;
isum += ist;
}
*nppmx = npx;
/* check for errors */
if (ierr > 0) {
*irc = ierr;
}
else if (isum != nop) {
*irc = -1;
}
return;
}
/*--------------------------------------------------------------------*/
void cppmovin3l(float part[], float ppart[], int kpic[], int nppmx,
int idimp, int nop, int mx, int my, int mz, int mx1,
int my1, int mxyz1, int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
and copies to segmented array ppart
linear interpolation
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n
part[n][1] = position y of particle n
part[n][2] = position z of particle n
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = position z of particle n in tile m
ppart[m][n][3] = velocity vx of particle n in tile m
ppart[m][n][4] = velocity vy of particle n in tile m
ppart[m][n][5] = velocity vz of particle n in tile m
kpic = output number of particles per tile
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nop = number of particles
mx/my/mz = number of grids in sorting cell in x, y and z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, l, mxy1, ip, ierr;
ierr = 0;
mxy1 = mx1*my1;
/* clear counter array */
for (k = 0; k < mxyz1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile and reorder particles */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
n = n/mx;
m = part[1+idimp*j];
m = m/my;
l = part[2+idimp*j];
l = l/mz;
m = n + mx1*m + mxy1*l;
ip = kpic[m];
if (ip < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j];
}
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
void cppcheck3l(float ppart[], int kpic[], int idimp, int nppmx, int nx,
int ny, int nz, int mx, int my, int mz, int mx1,
int my1, int mz1, int *irc) {
/* this subroutine performs a sanity check to make sure particles sorted
by x,y,z grid in tiles of mx, my, mz, are all within bounds.
tiles are assumed to be arranged in 3D linear memory
input: all except irc
output: irc
ppart[l][n][0] = position x of particle n in tile l
ppart[l][n][1] = position y of particle n in tile l
ppart[l][n][2] = position a of particle n in tile l
kpic(l) = number of reordered output particles in tile l
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = number of grids in sorting cell in x/y/z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
irc = particle error, returned only if error occurs, when irc > 0
local data */
int mxy1, mxyz1, noff, moff, loff, npp, j, k, l, nn, mm, ll, ist;
float edgelx, edgely, edgelz, edgerx, edgery, edgerz, dx, dy, dz;
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
/* loop over tiles */
#pragma omp parallel for \
private(j,k,l,noff,moff,loff,npp,nn,mm,ll,ist,edgelx,edgely,edgelz, \
edgerx,edgery,edgerz,dx,dy,dz)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[idimp*(j+nppmx*l)];
dy = ppart[1+idimp*(j+nppmx*l)];
dz = ppart[2+idimp*(j+nppmx*l)];
/* find particles going out of bounds */
ist = 0;
if (dx < edgelx)
ist = 1;
if (dx >= edgerx)
ist = 2;
if (dy < edgely)
ist += 3;
if (dy >= edgery)
ist += 6;
if (dz < edgelz)
ist += 9;
if (dz >= edgerz)
ist += 18;
if (ist > 0)
*irc = l + 1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cgppush3l(float ppart[], float fxyz[], int kpic[], float qbm,
float dt, float *ek, int idimp, int nppmx, int nx,
int ny, int nz, int mx, int my, int mz, int nxv, int nyv,
int nzv, int mx1, int my1, int mxyz1, int ipbc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with various boundary conditions.
OpenMP version using guard cells
data read in tiles
particles stored segmented array
94 flops/particle, 30 loads, 6 stores
input: all, output: part, ek
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t),z(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t),z(t))*dt,
vz(t+dt/2) = vz(t-dt/2) + (q/m)*fz(x(t),y(t),z(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt,
z(t+dt) = z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
fy(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fy(n,m,l)+dx*fy(n+1,m,l))
+ dy*((1-dx)*fy(n,m+1,l) + dx*fy(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fy(n,m,l+1)+dx*fy(n+1,m,l+1))
+ dy*((1-dx)*fy(n,m+1,l+1) + dx*fy(n+1,m+1,l+1)))
fz(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fz(n,m,l)+dx*fz(n+1,m,l))
+ dy*((1-dx)*fz(n,m+1,l) + dx*fz(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fz(n,m,l+1)+dx*fz(n+1,m,l+1))
+ dy*((1-dx)*fz(n,m+1,l+1) + dx*fz(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = position z of particle n in tile m
ppart[m][n][3] = velocity vx of particle n in tile m
ppart[m][n][4] = velocity vy of particle n in tile m
ppart[m][n][5] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2+
(vz(t+dt/2)+vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field array, must be >= nx+1
nyv = third dimension of field array, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp;
int i, j, k, l, nn, mm, ll, mxv, myv, mxyv, nxyv;
float qtm, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float x, y, z, dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz;
float vx, vy, vz;
float sfxyz[3*MXV*MYV*MZV];
/* float sfxyz[3*(mx+1)*(my+1)*(mz+1)]; */
double sum1, sum2;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
mxy1 = mx1*my1;
qtm = qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,x,y,z,dxp,dyp,dzp, \
amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,sum1,sfxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = nppmx*l;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ll = (mz < nz-loff ? mz : nz-loff) + 1;
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxyz[3*(i+mxv*j+mxyv*k)]
= fxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+3*(i+mxv*j+mxyv*k)]
= fxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+3*(i+mxv*j+mxyv*k)]
= fxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
z = ppart[2+idimp*(j+npoff)];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find acceleration */
dx = amx*sfxyz[nn] + amy*sfxyz[nn+3];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3];
mm = nn + 3*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]);
nn += 3*mxyv;
vx = amx*sfxyz[nn] + amy*sfxyz[nn+3];
vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3];
vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3];
mm = nn + 3*mxv;
dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]);
dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]);
dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]);
/* new velocity */
vx = ppart[3+idimp*(j+npoff)];
vy = ppart[4+idimp*(j+npoff)];
vz = ppart[5+idimp*(j+npoff)];
dx = vx + qtm*dx;
dy = vy + qtm*dy;
dz = vz + qtm*dz;
/* average kinetic energy */
vx += dx;
vy += dy;
vz += dz;
sum1 += vx*vx + vy*vy+ vz*vz;
ppart[3+idimp*(j+npoff)] = dx;
ppart[4+idimp*(j+npoff)] = dy;
ppart[5+idimp*(j+npoff)] = dz;
/* new position */
dx = x + dx*dt;
dy = y + dy*dt;
dz = z + dz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)];
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[5+idimp*(j+npoff)] = -ppart[5+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
ppart[2+idimp*(j+npoff)] = dz;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cgppushf3l(float ppart[], float fxyz[], int kpic[], int ncl[],
int ihole[], float qbm, float dt, float *ek, int idimp,
int nppmx, int nx, int ny, int nz, int mx, int my,
int mz, int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1, int ntmax, int *irc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data read in tiles
particles stored segmented array
94 flops/particle, 30 loads, 6 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t),z(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t),z(t))*dt,
vz(t+dt/2) = vz(t-dt/2) + (q/m)*fz(x(t),y(t),z(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt,
z(t+dt) = z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
fy(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fy(n,m,l)+dx*fy(n+1,m,l))
+ dy*((1-dx)*fy(n,m+1,l) + dx*fy(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fy(n,m,l+1)+dx*fy(n+1,m,l+1))
+ dy*((1-dx)*fy(n,m+1,l+1) + dx*fy(n+1,m+1,l+1)))
fz(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fz(n,m,l)+dx*fz(n+1,m,l))
+ dy*((1-dx)*fz(n,m+1,l) + dx*fz(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fz(n,m,l+1)+dx*fz(n+1,m,l+1))
+ dy*((1-dx)*fz(n,m+1,l+1) + dx*fz(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = position z of particle n in tile m
ppart[m][n][3] = velocity vx of particle n in tile m
ppart[m][n][4] = velocity vy of particle n in tile m
ppart[m][n][5] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2+
(vz(t+dt/2)+vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field array, must be >= nx+1
nyv = third dimension of field array, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp;
int i, j, k, l, ih, nh, nn, mm, ll, mxv, myv, mxyv, nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float qtm, x, y, z, dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz;
float vx, vy, vz;
float sfxyz[3*MXV*MYV*MZV];
/* float sfxyz[3*(mx+1)*(my+1)*(mz+1)]; */
double sum1, sum2;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
mxy1 = mx1*my1;
qtm = qbm*dt;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,ih,nh,x,y,z,dxp,dyp, \
dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,edgelx,edgely,edgelz,edgerx, \
edgery,edgerz,sum1,sfxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* load local fields from global array */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxyz[3*(i+mxv*j+mxyv*k)]
= fxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+3*(i+mxv*j+mxyv*k)]
= fxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+3*(i+mxv*j+mxyv*k)]
= fxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* clear counters */
for (j = 0; j < 26; j++) {
ncl[j+26*l] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
z = ppart[2+idimp*(j+npoff)];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find acceleration */
dx = amx*sfxyz[nn] + amy*sfxyz[nn+3];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3];
mm = nn + 3*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]);
nn += 3*mxyv;
vx = amx*sfxyz[nn] + amy*sfxyz[nn+3];
vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3];
vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3];
mm = nn + 3*mxv;
dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]);
dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]);
dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]);
/* new velocity */
vx = ppart[3+idimp*(j+npoff)];
vy = ppart[4+idimp*(j+npoff)];
vz = ppart[5+idimp*(j+npoff)];
dx = vx + qtm*dx;
dy = vy + qtm*dy;
dz = vz + qtm*dz;
/* average kinetic energy */
vx += dx;
vy += dy;
vz += dz;
sum1 += vx*vx + vy*vy+ vz*vz;
ppart[3+idimp*(j+npoff)] = dx;
ppart[4+idimp*(j+npoff)] = dy;
ppart[5+idimp*(j+npoff)] = dz;
/* new position */
dx = x + dx*dt;
dy = y + dy*dt;
dz = z + dz*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
ppart[2+idimp*(j+npoff)] = dz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cgppost3l(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp;
int i, j, k, l, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, dxp, dyp, dzp, amx, amy, amz, dx1;
float sq[MXV*MYV*MZV];
/* float sq[(mx+1)*(my+1)*(mz+1)]; */
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
mxy1 = mx1*my1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,lm,x,y,z,dxp,dyp, \
dzp,amx,amy,amz,dx1,sq)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = nppmx*l;
/* zero out local accumulator */
for (j = 0; j < mxyv*(mz+1); j++) {
sq[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
z = ppart[2+idimp*(j+npoff)];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
sq[nn] = x;
sq[nn+1] = y;
mm = nn + mxv;
x = sq[mm] + dyp*amz;
y = sq[mm+1] + dx1*amz;
sq[mm] = x;
sq[mm+1] = y;
nn += mxyv;
x = sq[nn] + amx*dzp;
y = sq[nn+1] + amy*dzp;
sq[nn] = x;
sq[nn+1] = y;
mm = nn + mxv;
x = sq[mm] + dyp*dzp;
y = sq[mm+1] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cpporder3l(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx, int ny,
int nz, int mx, int my, int mz, int mx1, int my1,
int mz1, int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = position z of particle n in tile m
ppbuff[l][n][i] = i co-ordinate of particle n in tile l
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, mxyz1, noff, moff, loff, npp, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dx, dy, dz;
int ks[26];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,l,noff,moff,loff,npp,nn,mm,ll,ih,nh,ist,dx,dy,dz,edgelx, \
edgely,edgelz,edgerx,edgery,edgerz)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
/* clear counters */
for (j = 0; j < 26; j++) {
ncl[j+26*l] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[idimp*(j+nppmx*l)];
dy = ppart[1+idimp*(j+nppmx*l)];
dz = ppart[2+idimp*(j+nppmx*l)];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[idimp*(j+nppmx*l)] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[idimp*(j+nppmx*l)] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[1+idimp*(j+nppmx*l)] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[1+idimp*(j+nppmx*l)] = dy;
}
else {
ist += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
ppart[2+idimp*(j+nppmx*l)] = dz - anz;
ist += 18;
}
else if (dz < edgelz) {
if (dz < 0.0) {
dz += anz;
if (dz < anz)
ist += 9;
else
dz = 0.0;
ppart[2+idimp*(j+nppmx*l)] = dz;
}
else {
ist += 9;
}
}
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,l,isum,ist,nh,ip,j1,ii)
for (l = 0; l < mxyz1; l++) {
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 26; j++) {
ist = ncl[j+26*l];
ncl[j+26*l] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*l];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*l)];
ii = ncl[ist+26*l-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[i+idimp*(ii+npbmx*l)]
= ppart[i+idimp*(j1+nppmx*l)];
}
}
else {
ip = 1;
}
ncl[ist+26*l-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,kx,ky,kz,kl,kr,kxl,kxr,lk,ll,lr,ih,nh,ncoff, \
ist,j1,j2,ip,ks)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*(ntmax+1)*l];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 26; ii++) {
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*l)]
= ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
if (ih < nh) {
ip = nh - ih;
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
j2 = ihole[2*(nh-j+(ntmax+1)*l)] - 1;
if (j1 > j2) {
/* move particle only if it is below current hole */
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j2+nppmx*l)]
= ppart[i+idimp*(j1+nppmx*l)];
}
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cpporderf3l(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1, int my1,
int mz1, int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cgppushf3l procedure.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = position z of particle n in tile m
ppbuff[l][n][i] = i co-ordinate of particle n in tile l
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, mxyz1, npp, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, ll, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
int ks[26];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,l,isum,ist,nh,ip,j1,ii)
for (l = 0; l < mxyz1; l++) {
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 26; j++) {
ist = ncl[j+26*l];
ncl[j+26*l] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*l];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*l)];
ii = ncl[ist+26*l-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[i+idimp*(ii+npbmx*l)]
= ppart[i+idimp*(j1+nppmx*l)];
}
}
else {
ip = 1;
}
ncl[ist+26*l-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,kx,ky,kz,kl,kr,kxl,kxr,lk,ll,lr,ih,nh,ncoff, \
ist,j1,j2,ip,ks)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*(ntmax+1)*l];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 26; ii++) {
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*l)]
= ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
if (ih < nh) {
ip = nh - ih;
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
j2 = ihole[2*(nh-j+(ntmax+1)*l)] - 1;
if (j1 > j2) {
/* move particle only if it is below current hole */
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j2+nppmx*l)]
= ppart[i+idimp*(j1+nppmx*l)];
}
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void ccguard3l(float fxyz[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* replicate extended periodic vector field fxyz
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
local data */
int j, k, l, nxye3, ll;
nxye3 = 3*nxe*nye;
/* copy edges of extended field */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,ll)
for (l = 0; l < nz; l++) {
ll = nxye3*l;
for (k = 0; k < ny; k++) {
fxyz[3*(nx+nxe*k)+ll] = fxyz[3*nxe*k+ll];
fxyz[1+3*(nx+nxe*k)+ll] = fxyz[1+3*nxe*k+ll];
fxyz[2+3*(nx+nxe*k)+ll] = fxyz[2+3*nxe*k+ll];
}
for (j = 0; j < nx; j++) {
fxyz[3*(j+nxe*ny)+ll] = fxyz[3*j+ll];
fxyz[1+3*(j+nxe*ny)+ll] = fxyz[1+3*j+ll];
fxyz[2+3*(j+nxe*ny)+ll] = fxyz[2+3*j+ll];
}
fxyz[3*(nx+nxe*ny)+ll] = fxyz[ll];
fxyz[1+3*(nx+nxe*ny)+ll] = fxyz[1+ll];
fxyz[2+3*(nx+nxe*ny)+ll] = fxyz[2+ll];
}
#pragma omp for \
private(j,k)
for (k = 0; k < ny; k++) {
for (j = 0; j < nx; j++) {
fxyz[3*(j+nxe*k)+nxye3*nz] = fxyz[3*(j+nxe*k)];
fxyz[1+3*(j+nxe*k)+nxye3*nz] = fxyz[1+3*(j+nxe*k)];
fxyz[2+3*(j+nxe*k)+nxye3*nz] = fxyz[2+3*(j+nxe*k)];
}
fxyz[3*(nx+nxe*k)+nxye3*nz] = fxyz[3*nxe*k];
fxyz[1+3*(nx+nxe*k)+nxye3*nz] = fxyz[1+3*nxe*k];
fxyz[2+3*(nx+nxe*k)+nxye3*nz] = fxyz[2+3*nxe*k];
}
}
for (j = 0; j < nx; j++) {
fxyz[3*(j+nxe*ny)+nxye3*nz] = fxyz[3*j];
fxyz[1+3*(j+nxe*ny)+nxye3*nz] = fxyz[1+3*j];
fxyz[2+3*(j+nxe*ny)+nxye3*nz] = fxyz[2+3*j];
}
fxyz[3*(nx+nxe*ny)+nxye3*nz] = fxyz[0];
fxyz[1+3*(nx+nxe*ny)+nxye3*nz] = fxyz[1];
fxyz[2+3*(nx+nxe*ny)+nxye3*nz] = fxyz[2];
return;
}
/*--------------------------------------------------------------------*/
void caguard3l(float q[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
local data */
int j, k, l, nxye, ll;
nxye = nxe*nye;
/* accumulate edges of extended field */
#pragma omp parallel
{
#pragma omp for \
private(j,k,l,ll)
for (l = 0; l < nz; l++) {
ll = nxye*l;
for (k = 0; k < ny; k++) {
q[nxe*k+ll] += q[nx+nxe*k+ll];
q[nx+nxe*k+ll] = 0.0;
}
for (j = 0; j < nx; j++) {
q[j+ll] += q[j+nxe*ny+ll];
q[j+nxe*ny+ll] = 0.0;
}
q[ll] += q[nx+nxe*ny+ll];
q[nx+nxe*ny+ll] = 0.0;
}
#pragma omp for \
private(j,k)
for (k = 0; k < ny; k++) {
for (j = 0; j < nx; j++) {
q[j+nxe*k] += q[j+nxe*k+nxye*nz];
q[j+nxe*k+nxye*nz] = 0.0;
}
q[nxe*k] += q[nx+nxe*k+nxye*nz];
q[nx+nxe*k+nxye*nz] = 0.0;
}
}
for (j = 0; j < nx; j++) {
q[j] += q[j+nxe*ny+nxye*nz];
q[j+nxe*ny+nxye*nz] = 0.0;
}
q[0] += q[nx+nxe*ny+nxye*nz];
q[nx+nxe*ny+nxye*nz] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void cmpois33(float complex q[], float complex fxyz[], int isign,
float complex ffc[], float ax, float ay, float az,
float affp, float *we, int nx, int ny, int nz, int nxvh,
int nyv, int nzv, int nxhd, int nyhd, int nzhd) {
/* this subroutine solves 3d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions.
for isign = 0, output: ffc
input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
for isign = -1, output: fxyz, we
input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
approximate flop count is:
59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc)
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
equation used is:
fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx],
fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx],
fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx],
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers,
g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx],
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0,
fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0,
fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0,
fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0.
q[l][k][j] = complex charge density for fourier mode (j,k,l)
fxyz[l][k][j][0] = x component of complex force/charge
fxyz[l][k][j][1] = y component of complex force/charge
fxyz[l][k][j][2] = z component of complex force/charge
all for fourier mode (j,k,l)
cimag(ffc[l][k][j]) = finite-size particle shape factor s
for fourier mode (j,k,l)
creal(ffc[l][k][j]) = potential green's function g
for fourier mode (j,k,l)
ax/ay/az = half-width of particle in x/y/z direction
affp = normalization constant = nx*ny*nz/np,
where np=number of particles
electric field energy is also calculated, using
we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*
|q[kz][ky][kx]*s[kz][ky][kx]|**2)
nx/ny/nz = system length in x/y/z direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nzv = third dimension of field arrays, must be >= nz
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
nzhd = third dimension of form factor array, must be >= nzh
local data */
int nxh, nyh, nzh, j, k, l, k1, l1, kk, kj, ll, lj, nxyhd, nxvyh;
float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6;
float complex zero, zt1, zt2;
double wp, sum1, sum2;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
if (isign != 0)
goto L40;
/* prepare form factor array */
for (l = 0; l < nzh; l++) {
dkz = dnz*(float) l;
ll = nxyhd*l;
at1 = dkz*dkz;
at2 = pow((dkz*az),2);
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at3 = dky*dky + at1;
at4 = pow((dky*ay),2) + at2;
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at5 = dkx*dkx + at3;
at6 = exp(-0.5*(pow((dkx*ax),2) + at4));
if (at5==0.0) {
ffc[j+kk+ll] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I;
}
}
}
}
return;
/* calculate force/charge and sum field energy */
L40: sum1 = 0.0;
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp) \
reduction(+:sum1)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
wp = 0.0;
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at4 = dkz*at1;
zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I;
zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I;
fxyz[3*(j+kj+lj)] = at2*zt1;
fxyz[1+3*(j+kj+lj)] = at3*zt1;
fxyz[2+3*(j+kj+lj)] = at4*zt1;
fxyz[3*(j+k1+lj)] = at2*zt2;
fxyz[1+3*(j+k1+lj)] = -at3*zt2;
fxyz[2+3*(j+k1+lj)] = at4*zt2;
zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I;
zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I;
fxyz[3*(j+kj+l1)] = at2*zt1;
fxyz[1+3*(j+kj+l1)] = at3*zt1;
fxyz[2+3*(j+kj+l1)] = -at4*zt1;
fxyz[3*(j+k1+l1)] = at2*zt2;
fxyz[1+3*(j+k1+l1)] = -at3*zt2;
fxyz[2+3*(j+k1+l1)] = -at4*zt2;
wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj])
+ q[j+k1+lj]*conjf(q[j+k1+lj])
+ q[j+kj+l1]*conjf(q[j+kj+l1])
+ q[j+k1+l1]*conjf(q[j+k1+l1]));
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]);
at3 = at1*dny*(float) k;
at4 = dkz*at1;
zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I;
zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I;
fxyz[3*(kj+lj)] = zero;
fxyz[1+3*(kj+lj)] = at3*zt1;
fxyz[2+3*(kj+lj)] = at4*zt1;
fxyz[3*(k1+lj)] = zero;
fxyz[1+3*(k1+lj)] = zero;
fxyz[2+3*(k1+lj)] = zero;
fxyz[3*(kj+l1)] = zero;
fxyz[1+3*(kj+l1)] = at3*zt2;
fxyz[2+3*(kj+l1)] = -at4*zt2;
fxyz[3*(k1+l1)] = zero;
fxyz[1+3*(k1+l1)] = zero;
fxyz[2+3*(k1+l1)] = zero;
wp += at1*(q[kj+lj]*conjf(q[kj+lj])
+ q[kj+l1]*conjf(q[kj+l1]));
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]);
at2 = at1*dnx*(float) j;
at4 = dkz*at1;
zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I;
zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I;
fxyz[3*(j+lj)] = at2*zt1;
fxyz[1+3*(j+lj)] = zero;
fxyz[2+3*(j+lj)] = at4*zt1;
fxyz[3*(j+k1+lj)] = zero;
fxyz[1+3*(j+k1+lj)] = zero;
fxyz[2+3*(j+k1+lj)] = zero;
fxyz[3*(j+l1)] = at2*zt2;
fxyz[1+3*(j+l1)] = zero;
fxyz[2+3*(j+l1)] = -at4*zt2;
fxyz[3*(j+k1+l1)] = zero;
fxyz[1+3*(j+k1+l1)] = zero;
fxyz[2+3*(j+k1+l1)] = zero;
wp += at1*(q[j+lj]*conjf(q[j+lj])
+ q[j+l1]*conjf(q[j+l1]));
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[ll])*cimagf(ffc[ll]);
at4 = dkz*at1;
zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I;
fxyz[3*lj] = zero;
fxyz[1+3*lj] = zero;
fxyz[2+3*lj] = at4*zt1;
fxyz[3*(k1+lj)] = zero;
fxyz[1+3*(k1+lj)] = zero;
fxyz[2+3*(k1+lj)] = zero;
fxyz[3*l1] = zero;
fxyz[1+3*l1] = zero;
fxyz[2+3*l1] = zero;
fxyz[3*(k1+l1)] = zero;
fxyz[1+3*(k1+l1)] = zero;
fxyz[2+3*(k1+l1)] = zero;
wp += at1*(q[lj]*conjf(q[lj]));
sum1 += wp;
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
sum2 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxyz[3*(j+kj)] = at2*zt1;
fxyz[1+3*(j+kj)] = at3*zt1;
fxyz[2+3*(j+kj)] = zero;
fxyz[3*(j+k1)] = at2*zt2;
fxyz[1+3*(j+k1)] = -at3*zt2;
fxyz[2+3*(j+k1)] = zero;
fxyz[3*(j+kj+l1)] = zero;
fxyz[1+3*(j+kj+l1)] = zero;
fxyz[2+3*(j+kj+l1)] = zero;
fxyz[3*(j+k1+l1)] = zero;
fxyz[1+3*(j+k1+l1)] = zero;
fxyz[2+3*(j+k1+l1)] = zero;
wp += at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxyz[3*kj] = zero;
fxyz[1+3*kj] = at3*zt1;
fxyz[2+3*kj] = zero;
fxyz[3*k1] = zero;
fxyz[1+3*k1] = zero;
fxyz[2+3*k1] = zero;
fxyz[3*(kj+l1)] = zero;
fxyz[1+3*(kj+l1)] = zero;
fxyz[2+3*(kj+l1)] = zero;
fxyz[3*(k1+l1)] = zero;
fxyz[1+3*(k1+l1)] = zero;
fxyz[2+3*(k1+l1)] = zero;
wp += at1*(q[kj]*conjf(q[kj]));
sum2 += wp;
}
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxyz[3*j] = at2*zt1;
fxyz[1+3*j] = zero;
fxyz[2+3*j] = zero;
fxyz[3*(j+k1)] = zero;
fxyz[1+3*(j+k1)] = zero;
fxyz[2+3*(j+k1)] = zero;
fxyz[3*(j+l1)] = zero;
fxyz[1+3*(j+l1)] = zero;
fxyz[2+3*(j+l1)] = zero;
fxyz[3*(j+k1+l1)] = zero;
fxyz[1+3*(j+k1+l1)] = zero;
fxyz[2+3*(j+k1+l1)] = zero;
wp += at1*(q[j]*conjf(q[j]));
}
fxyz[0] = zero;
fxyz[1] = zero;
fxyz[2] = zero;
fxyz[3*k1] = zero;
fxyz[1+3*k1] = zero;
fxyz[2+3*k1] = zero;
fxyz[3*l1] = zero;
fxyz[1+3*l1] = zero;
fxyz[2+3*l1] = zero;
fxyz[3*(k1+l1)] = zero;
fxyz[1+3*(k1+l1)] = zero;
fxyz[2+3*(k1+l1)] = zero;
*we = (sum1 + sum2 + wp)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void cwfft3rinit(int mixup[], float complex sct[], int indx, int indy,
int indz, int nxhyzd, int nxyzhd) {
/* this subroutine calculates tables needed by a three dimensional
real to complex fast fourier transform and its inverse.
input: indx, indy, indz, nxhyzd, nxyzhd
output: mixup, sct
mixup = array of bit reversed addresses
sct = sine/cosine table
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = one half of maximum of (nx,ny,nz)
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, ny, nz, nxyz, nxhyz, nxyzh;
int j, k, lb, ll, jb, it;
float dnxyz, arg;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
ny = 1L<<indy;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
/* bit-reverse index table: mixup[j] = 1 + reversed bits of j */
for (j = 0; j < nxhyz; j++) {
lb = j;
ll = 0;
for (k = 0; k < ndx1yz; k++) {
jb = lb/2;
it = lb - 2*jb;
lb = jb;
ll = 2*ll + it;
}
mixup[j] = ll + 1;
}
/* sine/cosine table for the angles 2*n*pi/nxyz */
nxyzh = nxyz/2;
dnxyz = 6.28318530717959/(float) nxyz;
for (j = 0; j < nxyzh; j++) {
arg = dnxyz*(float) j;
sct[j] = cosf(arg) - sinf(arg)*_Complex_I;
}
return;
}
/*--------------------------------------------------------------------*/
void cfft3rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd,
int nxyzhd) {
/* this subroutine performs the x-y part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in x and y is performed
f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)*
exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in x and y is performed
f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd;
int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
float ani;
float complex t1, t2, t3;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhyd = nxhd*nyd;
if (isign > 0)
goto L180;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t2 = t1*f[j2+joff];
f[j2+joff] = f[j1+joff] - t2;
f[j1+joff] += t2;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
for (i = 0; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = sct[kmr*j];
for (i = 0; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[joff] + t1)
+ crealf(f[joff] - t1)*_Complex_I);
f[joff] = 0.5*(crealf(f[joff] + t1)
+ cimagf(f[joff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L180: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[joff] - t1);
f[joff] += t1;
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
for (i = 0; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
for (i = 0; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
}
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = conjf(sct[kmr*j]);
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t2 = t1*f[j2+joff];
f[j2+joff] = f[j1+joff] - t2;
f[j1+joff] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft3rmxz(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd,
int nxyzhd) {
/* this subroutine performs the z part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in z is performed
f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, a forward fourier transform in z is performed
f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff;
int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1;
float complex t1, t2;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhyd = nxhd*nyd;
if (isign > 0)
goto L90;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
for (i = 0; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
i0 = ioff + j1;
i1 = ioff + j2;
for (i = 0; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
if (nyi==1) {
t1 = f[l1];
f[l1] = 0.5*(cimagf(f[ll] + t1)
+ crealf(f[ll] - t1)*_Complex_I);
f[ll] = 0.5*(crealf(f[ll] + t1)
+ cimagf(f[ll] - t1)*_Complex_I);
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = f[i1];
f[i1] = 0.5*(cimagf(f[i0] + t1)
+ crealf(f[i0] - t1)*_Complex_I);
f[i0] = 0.5*(crealf(f[i0] + t1)
+ cimagf(f[i0] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L90: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
if (nyi==1) {
t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I;
f[l1] = conjf(f[ll] - t1);
f[ll] += t1;
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I;
f[i1] = conjf(f[i0] - t1);
f[i0] += t1;
}
}
/* bit-reverse array elements in z */
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
for (i = 0; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
i0 = ioff + j1;
i1 = ioff + j2;
for (i = 0; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft3rm3xy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of 3 three dimensional complex
to real fast fourier transforms and their inverses, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in x and y are
performed
f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*n*j/nx)
*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, three forward fourier transforms in x and y are
performed
f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd3, nxhyd;
int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
float at1, at2, ani;
float complex t1, t2, t3, t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhd3 = 3*nxhd;
nxhyd = nxhd3*nyd;
if (isign > 0)
goto L230;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2,ani,t1, \
t2,t3,t4)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd3*i + nn;
for (j = 0; j < nxh; j++) {
at1 = crealf(f[2+3*j+joff]);
f[2+3*j+joff] = crealf(f[1+3*j+joff])
+ cimagf(f[2+3*j+joff])*_Complex_I;
at2 = cimagf(f[1+3*j+joff]);
f[1+3*j+joff] = cimagf(f[3*j+joff]) + at1*_Complex_I;
f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I;
}
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd3*i + nn;
t1 = f[3*j1+joff];
t2 = f[1+3*j1+joff];
t3 = f[2+3*j1+joff];
f[3*j1+joff] = f[3*j+joff];
f[1+3*j1+joff] = f[1+3*j+joff];
f[2+3*j1+joff] = f[2+3*j+joff];
f[3*j+joff] = t1;
f[1+3*j+joff] = t2;
f[2+3*j+joff] = t3;
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
for (i = 0; i < ny; i++) {
joff = nxhd3*i + nn;
t2 = t1*f[3*j2+joff];
t3 = t1*f[1+3*j2+joff];
t4 = t1*f[2+3*j2+joff];
f[3*j2+joff] = f[3*j1+joff] - t2;
f[1+3*j2+joff] = f[1+3*j1+joff] - t3;
f[2+3*j2+joff] = f[2+3*j1+joff] - t4;
f[3*j1+joff] += t2;
f[1+3*j1+joff] += t3;
f[2+3*j1+joff] += t4;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < ny; k++) {
joff = nxhd3*k + nn;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+3*(nxh-j)+joff]);
t1 = f[jj+3*j+joff] + t2;
t2 = (f[jj+3*j+joff] - t2)*t3;
f[jj+3*j+joff] = ani*(t1 + t2);
f[jj+3*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
}
ani = 2.0*ani;
for (k = 0; k < ny; k++) {
joff = nxhd3*k + nn;
for (jj = 0; jj < 3; jj++) {
f[jj+3*nxhh+joff] = ani*conjf(f[jj+3*nxhh+joff]);
f[jj+joff] = ani*((crealf(f[jj+joff])
+ cimagf(f[jj+joff]))
+ (crealf(f[jj+joff])
- cimagf(f[jj+joff]))*_Complex_I);
}
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd3*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd3*k1 + nn;
for (i = 0; i < nxh; i++) {
t1 = f[3*i+k1];
t2 = f[1+3*i+k1];
t3 = f[2+3*i+k1];
f[3*i+k1] = f[3*i+joff];
f[1+3*i+k1] = f[1+3*i+joff];
f[2+3*i+k1] = f[2+3*i+joff];
f[3*i+joff] = t1;
f[1+3*i+joff] = t2;
f[2+3*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd3*(j + k1) + nn;
j2 = nxhd3*(j + k2) + nn;
t1 = sct[kmr*j];
for (i = 0; i < nxh; i++) {
t2 = t1*f[3*i+j2];
t3 = t1*f[1+3*i+j2];
t4 = t1*f[2+3*i+j2];
f[3*i+j2] = f[3*i+j1] - t2;
f[1+3*i+j2] = f[1+3*i+j1] - t3;
f[2+3*i+j2] = f[2+3*i+j1] - t4;
f[3*i+j1] += t2;
f[1+3*i+j1] += t3;
f[2+3*i+j1] += t4;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd3*k;
k1 = nxhd3*ny - joff + nn;
joff += nn;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+k1];
f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1)
+ crealf(f[jj+joff] - t1)*_Complex_I);
f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1)
+ cimagf(f[jj+joff] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L230: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2,t1,t2, \
t3,t4)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd3*k;
k1 = nxhd3*ny - joff + nn;
joff += nn;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I;
f[jj+k1] = conjf(f[jj+joff] - t1);
f[jj+joff] += t1;
}
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd3*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd3*k1 + nn;
for (i = 0; i < nxh; i++) {
t1 = f[3*i+k1];
t2 = f[1+3*i+k1];
t3 = f[2+3*i+k1];
f[3*i+k1] = f[3*i+joff];
f[1+3*i+k1] = f[1+3*i+joff];
f[2+3*i+k1] = f[2+3*i+joff];
f[3*i+joff] = t1;
f[1+3*i+joff] = t2;
f[2+3*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd3*(j + k1) + nn;
j2 = nxhd3*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
for (i = 0; i < nxh; i++) {
t2 = t1*f[3*i+j2];
t3 = t1*f[1+3*i+j2];
t4 = t1*f[2+3*i+j2];
f[3*i+j2] = f[3*i+j1] - t2;
f[1+3*i+j2] = f[1+3*i+j1] - t3;
f[2+3*i+j2] = f[2+3*i+j1] - t4;
f[3*i+j1] += t2;
f[1+3*i+j1] += t3;
f[2+3*i+j1] += t4;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < ny; k++) {
joff = nxhd3*k + nn;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+3*(nxh-j)+joff]);
t1 = f[jj+3*j+joff] + t2;
t2 = (f[jj+3*j+joff] - t2)*t3;
f[jj+3*j+joff] = t1 + t2;
f[jj+3*(nxh-j)+joff] = conjf(t1 - t2);
}
}
}
for (k = 0; k < ny; k++) {
joff = nxhd3*k + nn;
for (jj = 0; jj < 3; jj++) {
f[jj+3*nxhh+joff] = 2.0*conjf(f[jj+3*nxhh+joff]);
f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff])
- cimagf(f[jj+joff]))*_Complex_I;
}
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd3*i + nn;
t1 = f[3*j1+joff];
t2 = f[1+3*j1+joff];
t3 = f[2+3*j1+joff];
f[3*j1+joff] = f[3*j+joff];
f[1+3*j1+joff] = f[1+3*j+joff];
f[2+3*j1+joff] = f[2+3*j+joff];
f[3*j+joff] = t1;
f[1+3*j+joff] = t2;
f[2+3*j+joff] = t3;
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = conjf(sct[kmr*j]);
for (i = 0; i < ny; i++) {
joff = nxhd3*i + nn;
t2 = t1*f[3*j2+joff];
t3 = t1*f[1+3*j2+joff];
t4 = t1*f[2+3*j2+joff];
f[3*j2+joff] = f[3*j1+joff] - t2;
f[1+3*j2+joff] = f[1+3*j1+joff] - t3;
f[2+3*j2+joff] = f[2+3*j1+joff] - t4;
f[3*j1+joff] += t2;
f[1+3*j1+joff] += t3;
f[2+3*j1+joff] += t4;
}
}
}
ns = ns2;
}
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd3*i + nn;
for (j = 0; j < nxh; j++) {
at1 = crealf(f[2+3*j+joff]);
f[2+3*j+joff] = cimagf(f[1+3*j+joff])
+ cimagf(f[2+3*j+joff])*_Complex_I;
at2 = crealf(f[1+3*j+joff]);
f[1+3*j+joff] = at1 + cimagf(f[3*j+joff])*_Complex_I;
f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I;
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft3rm3z(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd,
int nxyzhd) {
/* this subroutine performs the z part of 3 three dimensional complex to
real fast fourier transforms and their inverses, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in z are performed
f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, three forward fourier transforms in z are performed
f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd3, nxhyd, ioff;
int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr;
int i0, i1;
float complex t1, t2, t3, t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhd3 = 3*nxhd;
nxhyd = nxhd3*nyd;
if (isign > 0)
goto L110;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* bit-reverse array elements in z */
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd3*n;
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
for (i = 0; i < nxh; i++) {
t1 = f[3*i+i1];
t2 = f[1+3*i+i1];
t3 = f[2+3*i+i1];
f[3*i+i1] = f[3*i+i0];
f[1+3*i+i1] = f[1+3*i+i0];
f[2+3*i+i1] = f[2+3*i+i0];
f[3*i+i0] = t1;
f[1+3*i+i0] = t2;
f[2+3*i+i0] = t3;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
i0 = ioff + j1;
i1 = ioff + j2;
for (i = 0; i < nxh; i++) {
t2 = t1*f[3*i+i1];
t3 = t1*f[1+3*i+i1];
t4 = t1*f[2+3*i+i1];
f[3*i+i1] = f[3*i+i0] - t2;
f[1+3*i+i1] = f[1+3*i+i0] - t3;
f[2+3*i+i1] = f[2+3*i+i0] - t4;
f[3*i+i0] += t2;
f[1+3*i+i0] += t3;
f[2+3*i+i0] += t4;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
if (nyi==1) {
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+l1];
f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1)
+ crealf(f[jj+ll] - t1)*_Complex_I);
f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1)
+ cimagf(f[jj+ll] - t1)*_Complex_I);
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (jj = 0; jj < 3; jj++) {
i1 = nxhd3*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = f[jj+i1];
f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1)
+ crealf(f[jj+i0] - t1)*_Complex_I);
f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1)
+ cimagf(f[jj+i0] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L110: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
if (nyi==1) {
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I;
f[jj+l1] = conjf(f[jj+ll] - t1);
f[jj+ll] += t1;
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (jj = 0; jj < 3; jj++) {
i1 = nxhd3*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I;
f[jj+i1] = conjf(f[jj+i0] - t1);
f[jj+i0] += t1;
}
}
}
/* bit-reverse array elements in z */
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd3*n;
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff+ l1;
for (i = 0; i < nxh; i++) {
t1 = f[3*i+i1];
t2 = f[1+3*i+i1];
t3 = f[2+3*i+i1];
f[3*i+i1] = f[3*i+i0];
f[1+3*i+i1] = f[1+3*i+i0];
f[2+3*i+i1] = f[2+3*i+i0];
f[3*i+i0] = t1;
f[1+3*i+i0] = t2;
f[2+3*i+i0] = t3;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
i0 = ioff + j1;
i1 = ioff + j2;
for (i = 0; i < nxh; i++) {
t2 = t1*f[3*i+i1];
t3 = t1*f[1+3*i+i1];
t4 = t1*f[2+3*i+i1];
f[3*i+i1] = f[3*i+i0] - t2;
f[1+3*i+i1] = f[1+3*i+i0] - t3;
f[2+3*i+i1] = f[2+3*i+i0] - t4;
f[3*i+i0] += t2;
f[1+3*i+i0] += t3;
f[2+3*i+i0] += t4;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft3rmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for real to complex fft, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
cfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform z fft */
cfft3rmxz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
cfft3rmxz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
cfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft3rm3(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for 3 3d real to complex ffts, with packed data */
/* parallelized with OpenMP */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
cfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform z fft */
cfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
cfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
cfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void cdistr3_(float *part, float *vtx, float *vty, float *vtz,
float *vdx, float *vdy, float *vdz, int *npx, int *npy,
int *npz, int *idimp, int *nop, int *nx, int *ny, int *nz,
int *ipbc) {
cdistr3(part,*vtx,*vty,*vtz,*vdx,*vdy,*vdz,*npx,*npy,*npz,*idimp,
*nop,*nx,*ny,*nz,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cdblkp3l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop,
int *mx, int *my, int *mz, int *mx1, int *my1,
int *mxyz1, int *irc) {
cdblkp3l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mz,*mx1,*my1,*mxyz1,
irc);
return;
}
/*--------------------------------------------------------------------*/
void cppmovin3l_(float *part, float *ppart, int *kpic, int *nppmx,
int *idimp, int *nop, int *mx, int *my, int *mz,
int *mx1, int *my1, int *mxyz1, int *irc) {
cppmovin3l(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mz,*mx1,*my1,
*mxyz1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppcheck3l_(float *ppart, int *kpic, int *idimp, int *nppmx,
int *nx, int *ny, int *nz, int *mx, int *my, int *mz,
int *mx1, int *my1, int *mz1, int *irc) {
cppcheck3l(ppart,kpic,*idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*mx1,
*my1,*mz1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgppush3l_(float *ppart, float *fxyz, int *kpic, float *qbm,
float *dt, float *ek, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz, int *nxv,
int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1,
int *ipbc) {
cgppush3l(ppart,fxyz,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*nz,*mx,
*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgppushf3l_(float *ppart, float *fxyz, int *kpic, int *ncl,
int *ihole, float *qbm, float *dt, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *nz,
int *mx, int *my, int *mz, int *nxv, int *nyv,
int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax,
int *irc) {
cgppushf3l(ppart,fxyz,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx,*nx,
*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgppost3l_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
cgppost3l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,*nzv,
*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void cpporder3l_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx, int *ny,
int *nz, int *mx, int *my, int *mz, int *mx1,
int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) {
cpporder3l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz,*mx,
*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpporderf3l_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *mz1, int *npbmx, int *ntmax,
int *irc) {
cpporderf3l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,*mz1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
ccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void caguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe, int *nye,
int *nze) {
caguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void cmpois33_(float complex *q, float complex *fxyz, int *isign,
float complex *ffc, float *ax, float *ay, float *az,
float *affp, float *we, int *nx, int *ny, int *nz,
int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd,
int *nzhd) {
cmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh,
*nyv,*nzv,*nxhd,*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft3rinit_(int *mixup, float complex *sct, int *indx, int *indy,
int *indz, int *nxhyzd, int *nxyzhd) {
cwfft3rinit(mixup,sct,*indx,*indy,*indz,*nxhyzd,*nxyzhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft3rmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
cwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft3rm3_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
cwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
|
elastic-so12.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
double section3;
} ;
void bf0(struct dataobj *restrict damp_vec, struct dataobj *restrict irho_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads);
void bf1(struct dataobj *restrict damp_vec, struct dataobj *restrict lam_vec, struct dataobj *restrict mu_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads);
int ForwardElastic(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict irho_vec, struct dataobj *restrict lam_vec, struct dataobj *restrict mu_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec1_vec, struct dataobj *restrict rec1_coords_vec, struct dataobj *restrict rec2_vec, struct dataobj *restrict rec2_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec1_M, const int p_rec1_m, const int p_rec2_M, const int p_rec2_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers, const int x0_blk0_size, const int x1_blk0_size, const int y0_blk0_size, const int y1_blk0_size, const int nthreads, const int nthreads_nonaffine)
{
float (*restrict rec1)[rec1_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec1_vec->size[1]]) rec1_vec->data;
float (*restrict rec1_coords)[rec1_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec1_coords_vec->size[1]]) rec1_coords_vec->data;
float (*restrict rec2)[rec2_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec2_vec->size[1]]) rec2_vec->data;
float (*restrict rec2_coords)[rec2_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec2_coords_vec->size[1]]) rec2_coords_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict tau_xx)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]]) tau_xx_vec->data;
float (*restrict tau_yy)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]]) tau_yy_vec->data;
float (*restrict tau_zz)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]]) tau_zz_vec->data;
float (*restrict v_x)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]]) v_x_vec->data;
float (*restrict v_y)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]]) v_y_vec->data;
float (*restrict v_z)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]]) v_z_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
for (int time = time_m, t0 = (time)%(2), t1 = (time + 1)%(2); time <= time_M; time += 1, t0 = (time)%(2), t1 = (time + 1)%(2))
{
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads);
bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads);
bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads);
bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads);
bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x1_blk0_size,x_M - (x_M - x_m + 1)%(x1_blk0_size),x_m,y1_blk0_size,y_M - (y_M - y_m + 1)%(y1_blk0_size),y_m,z_M,z_m,nthreads);
bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x1_blk0_size,x_M - (x_M - x_m + 1)%(x1_blk0_size),x_m,(y_M - y_m + 1)%(y1_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y1_blk0_size) + 1,z_M,z_m,nthreads);
bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x1_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x1_blk0_size) + 1,y1_blk0_size,y_M - (y_M - y_m + 1)%(y1_blk0_size),y_m,z_M,z_m,nthreads);
bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x1_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x1_blk0_size) + 1,(y_M - y_m + 1)%(y1_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y1_blk0_size) + 1,z_M,z_m,nthreads);
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_src_M - p_src_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
int ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0]));
int ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1]));
int ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2]));
int ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1;
int ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1;
int ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1;
float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]);
float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]);
float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r0 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r0;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r1 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r1;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r2 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r2;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r3 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r3;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r4 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r4;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r5 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r5;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r6 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r6;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r7 = 1.0e-3F*px*py*pz*dt*src[time][p_src];
#pragma omp atomic update
tau_xx[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r7;
}
ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0]));
ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1]));
ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2]));
ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1;
ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1;
ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1;
px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]);
py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]);
pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r8 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r8;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r9 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r9;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r10 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r10;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r11 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r11;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r12 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r12;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r13 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r13;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r14 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r14;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r15 = 1.0e-3F*px*py*pz*dt*src[time][p_src];
#pragma omp atomic update
tau_zz[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r15;
}
ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0]));
ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1]));
ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2]));
ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1;
ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1;
ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1;
px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]);
py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]);
pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r16 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r16;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r17 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r17;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r18 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r18;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r19 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r19;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r20 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r20;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r21 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r21;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r22 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r22;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r23 = 1.0e-3F*px*py*pz*dt*src[time][p_src];
#pragma omp atomic update
tau_yy[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r23;
}
}
}
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_rec1_M - p_rec1_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int p_rec1 = p_rec1_m; p_rec1 <= p_rec1_M; p_rec1 += 1)
{
int ii_rec1_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec1_coords[p_rec1][0]));
int ii_rec1_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec1_coords[p_rec1][1]));
int ii_rec1_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec1_coords[p_rec1][2]));
int ii_rec1_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec1_coords[p_rec1][2])) + 1;
int ii_rec1_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec1_coords[p_rec1][1])) + 1;
int ii_rec1_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec1_coords[p_rec1][0])) + 1;
float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*rec1_coords[p_rec1][0])) + rec1_coords[p_rec1][0]);
float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*rec1_coords[p_rec1][1])) + rec1_coords[p_rec1][1]);
float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*rec1_coords[p_rec1][2])) + rec1_coords[p_rec1][2]);
float sum = 0.0F;
if (ii_rec1_0 >= x_m - 1 && ii_rec1_1 >= y_m - 1 && ii_rec1_2 >= z_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_2 <= z_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_1 + 12][ii_rec1_2 + 12];
}
if (ii_rec1_0 >= x_m - 1 && ii_rec1_1 >= y_m - 1 && ii_rec1_3 >= z_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_3 <= z_M + 1)
{
sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_1 + 12][ii_rec1_3 + 12];
}
if (ii_rec1_0 >= x_m - 1 && ii_rec1_2 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_2 <= z_M + 1 && ii_rec1_4 <= y_M + 1)
{
sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_4 + 12][ii_rec1_2 + 12];
}
if (ii_rec1_0 >= x_m - 1 && ii_rec1_3 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_3 <= z_M + 1 && ii_rec1_4 <= y_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_4 + 12][ii_rec1_3 + 12];
}
if (ii_rec1_1 >= y_m - 1 && ii_rec1_2 >= z_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_2 <= z_M + 1 && ii_rec1_5 <= x_M + 1)
{
sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_1 + 12][ii_rec1_2 + 12];
}
if (ii_rec1_1 >= y_m - 1 && ii_rec1_3 >= z_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_3 <= z_M + 1 && ii_rec1_5 <= x_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_1 + 12][ii_rec1_3 + 12];
}
if (ii_rec1_2 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_2 <= z_M + 1 && ii_rec1_4 <= y_M + 1 && ii_rec1_5 <= x_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_4 + 12][ii_rec1_2 + 12];
}
if (ii_rec1_3 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_3 <= z_M + 1 && ii_rec1_4 <= y_M + 1 && ii_rec1_5 <= x_M + 1)
{
sum += 1.0e-3F*px*py*pz*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_4 + 12][ii_rec1_3 + 12];
}
rec1[time][p_rec1] = sum;
}
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
struct timeval start_section3, end_section3;
gettimeofday(&start_section3, NULL);
/* Begin section3 */
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_rec2_M - p_rec2_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int p_rec2 = p_rec2_m; p_rec2 <= p_rec2_M; p_rec2 += 1)
{
int ii_rec2_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec2_coords[p_rec2][0]));
int ii_rec2_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec2_coords[p_rec2][1]));
int ii_rec2_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec2_coords[p_rec2][2]));
int ii_rec2_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec2_coords[p_rec2][2])) + 1;
int ii_rec2_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec2_coords[p_rec2][1])) + 1;
int ii_rec2_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec2_coords[p_rec2][0])) + 1;
float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*rec2_coords[p_rec2][0])) + rec2_coords[p_rec2][0]);
float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*rec2_coords[p_rec2][1])) + rec2_coords[p_rec2][1]);
float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*rec2_coords[p_rec2][2])) + rec2_coords[p_rec2][2]);
float sum = 0.0F;
if (ii_rec2_0 >= x_m - 1 && ii_rec2_1 >= y_m - 1 && ii_rec2_2 >= z_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_2 <= z_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_1 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_1 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_1 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_1 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 18]);
}
if (ii_rec2_0 >= x_m - 1 && ii_rec2_1 >= y_m - 1 && ii_rec2_3 >= z_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_3 <= z_M + 1)
{
sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_1 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_1 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_1 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_1 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 18]);
}
if (ii_rec2_0 >= x_m - 1 && ii_rec2_2 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_2 <= z_M + 1 && ii_rec2_4 <= y_M + 1)
{
sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_4 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_4 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_4 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_4 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 18]);
}
if (ii_rec2_0 >= x_m - 1 && ii_rec2_3 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_3 <= z_M + 1 && ii_rec2_4 <= y_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_4 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_4 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_4 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_4 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 18]);
}
if (ii_rec2_1 >= y_m - 1 && ii_rec2_2 >= z_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_2 <= z_M + 1 && ii_rec2_5 <= x_M + 1)
{
sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_1 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_1 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_1 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_1 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 18]);
}
if (ii_rec2_1 >= y_m - 1 && ii_rec2_3 >= z_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_3 <= z_M + 1 && ii_rec2_5 <= x_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_1 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_1 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_1 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_1 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 18]);
}
if (ii_rec2_2 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_2 <= z_M + 1 && ii_rec2_4 <= y_M + 1 && ii_rec2_5 <= x_M + 1)
{
sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_4 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_4 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_4 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_4 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 18]);
}
if (ii_rec2_3 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_3 <= z_M + 1 && ii_rec2_4 <= y_M + 1 && ii_rec2_5 <= x_M + 1)
{
sum += 1.0e-3F*px*py*pz*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_4 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_4 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_4 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_4 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 18]);
}
rec2[time][p_rec2] = sum;
}
}
/* End section3 */
gettimeofday(&end_section3, NULL);
timers->section3 += (double)(end_section3.tv_sec-start_section3.tv_sec)+(double)(end_section3.tv_usec-start_section3.tv_usec)/1000000;
}
return 0;
}
void bf0(struct dataobj *restrict damp_vec, struct dataobj *restrict irho_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict irho)[irho_vec->size[1]][irho_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[irho_vec->size[1]][irho_vec->size[2]]) irho_vec->data;
float (*restrict tau_xx)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]]) tau_xx_vec->data;
float (*restrict tau_xy)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]]) tau_xy_vec->data;
float (*restrict tau_xz)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]]) tau_xz_vec->data;
float (*restrict tau_yy)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]]) tau_yy_vec->data;
float (*restrict tau_yz)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]]) tau_yz_vec->data;
float (*restrict tau_zz)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]]) tau_zz_vec->data;
float (*restrict v_x)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]]) v_x_vec->data;
float (*restrict v_y)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]]) v_y_vec->data;
float (*restrict v_z)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]]) v_z_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1)
{
for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(damp,irho,tau_xx,tau_xy,tau_xz,tau_yy,tau_yz,tau_zz,v_x,v_y,v_z:32)
for (int z = z_m; z <= z_M; z += 1)
{
v_x[t1][x + 12][y + 12][z + 12] = 7.00999975204468e-1F*(irho[x + 12][y + 12][z + 12] + irho[x + 13][y + 12][z + 12])*(2.18478119e-6F*(tau_xx[t0][x + 7][y + 12][z + 12] - tau_xx[t0][x + 18][y + 12][z + 12] + tau_xy[t0][x + 12][y + 6][z + 12] - tau_xy[t0][x + 12][y + 17][z + 12] + tau_xz[t0][x + 12][y + 12][z + 6] - tau_xz[t0][x + 12][y + 12][z + 17]) + 3.59005404e-5F*(-tau_xx[t0][x + 8][y + 12][z + 12] + tau_xx[t0][x + 17][y + 12][z + 12] - tau_xy[t0][x + 12][y + 7][z + 12] + tau_xy[t0][x + 12][y + 16][z + 12] - tau_xz[t0][x + 12][y + 12][z + 7] + tau_xz[t0][x + 12][y + 12][z + 16]) + 2.96728956e-4F*(tau_xx[t0][x + 9][y + 12][z + 12] - tau_xx[t0][x + 16][y + 12][z + 12] + tau_xy[t0][x + 12][y + 8][z + 12] - tau_xy[t0][x + 12][y + 15][z + 12] + tau_xz[t0][x + 12][y + 12][z + 8] - tau_xz[t0][x + 12][y + 12][z + 15]) + 1.74476626e-3F*(-tau_xx[t0][x + 10][y + 12][z + 12] + tau_xx[t0][x + 15][y + 12][z + 12] - tau_xy[t0][x + 12][y + 9][z + 12] + tau_xy[t0][x + 12][y + 14][z + 12] - tau_xz[t0][x + 12][y + 12][z + 9] + tau_xz[t0][x + 12][y + 12][z + 14]) + 9.6931459e-3F*(tau_xx[t0][x + 11][y + 12][z + 12] - tau_xx[t0][x + 14][y + 12][z + 12] + tau_xy[t0][x + 12][y + 10][z + 12] - tau_xy[t0][x + 12][y + 13][z + 12] + tau_xz[t0][x + 12][y + 12][z + 10] - tau_xz[t0][x + 12][y + 12][z + 13]) + 1.22133638e-1F*(-tau_xx[t0][x + 12][y + 12][z + 12] + tau_xx[t0][x + 13][y + 12][z + 12] - tau_xy[t0][x + 12][y + 11][z + 12] + tau_xy[t0][x + 12][y + 12][z + 12] - tau_xz[t0][x + 12][y + 12][z + 11] + tau_xz[t0][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*v_x[t0][x + 12][y + 12][z + 12];
v_y[t1][x + 12][y + 12][z + 12] = 7.00999975204468e-1F*(irho[x + 12][y + 12][z + 12] + irho[x + 12][y + 13][z + 12])*(2.18478119e-6F*(tau_xy[t0][x + 6][y + 12][z + 12] - tau_xy[t0][x + 17][y + 12][z + 12] + tau_yy[t0][x + 12][y + 7][z + 12] - tau_yy[t0][x + 12][y + 18][z + 12] + tau_yz[t0][x + 12][y + 12][z + 6] - tau_yz[t0][x + 12][y + 12][z + 17]) + 3.59005404e-5F*(-tau_xy[t0][x + 7][y + 12][z + 12] + tau_xy[t0][x + 16][y + 12][z + 12] - tau_yy[t0][x + 12][y + 8][z + 12] + tau_yy[t0][x + 12][y + 17][z + 12] - tau_yz[t0][x + 12][y + 12][z + 7] + tau_yz[t0][x + 12][y + 12][z + 16]) + 2.96728956e-4F*(tau_xy[t0][x + 8][y + 12][z + 12] - tau_xy[t0][x + 15][y + 12][z + 12] + tau_yy[t0][x + 12][y + 9][z + 12] - tau_yy[t0][x + 12][y + 16][z + 12] + tau_yz[t0][x + 12][y + 12][z + 8] - tau_yz[t0][x + 12][y + 12][z + 15]) + 1.74476626e-3F*(-tau_xy[t0][x + 9][y + 12][z + 12] + tau_xy[t0][x + 14][y + 12][z + 12] - tau_yy[t0][x + 12][y + 10][z + 12] + tau_yy[t0][x + 12][y + 15][z + 12] - tau_yz[t0][x + 12][y + 12][z + 9] + tau_yz[t0][x + 12][y + 12][z + 14]) + 9.6931459e-3F*(tau_xy[t0][x + 10][y + 12][z + 12] - tau_xy[t0][x + 13][y + 12][z + 12] + tau_yy[t0][x + 12][y + 11][z + 12] - tau_yy[t0][x + 12][y + 14][z + 12] + tau_yz[t0][x + 12][y + 12][z + 10] - tau_yz[t0][x + 12][y + 12][z + 13]) + 1.22133638e-1F*(-tau_xy[t0][x + 11][y + 12][z + 12] + tau_xy[t0][x + 12][y + 12][z + 12] - tau_yy[t0][x + 12][y + 12][z + 12] + tau_yy[t0][x + 12][y + 13][z + 12] - tau_yz[t0][x + 12][y + 12][z + 11] + tau_yz[t0][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*v_y[t0][x + 12][y + 12][z + 12];
v_z[t1][x + 12][y + 12][z + 12] = 7.00999975204468e-1F*(irho[x + 12][y + 12][z + 12] + irho[x + 12][y + 12][z + 13])*(2.18478119e-6F*(tau_xz[t0][x + 6][y + 12][z + 12] - tau_xz[t0][x + 17][y + 12][z + 12] + tau_yz[t0][x + 12][y + 6][z + 12] - tau_yz[t0][x + 12][y + 17][z + 12] + tau_zz[t0][x + 12][y + 12][z + 7] - tau_zz[t0][x + 12][y + 12][z + 18]) + 3.59005404e-5F*(-tau_xz[t0][x + 7][y + 12][z + 12] + tau_xz[t0][x + 16][y + 12][z + 12] - tau_yz[t0][x + 12][y + 7][z + 12] + tau_yz[t0][x + 12][y + 16][z + 12] - tau_zz[t0][x + 12][y + 12][z + 8] + tau_zz[t0][x + 12][y + 12][z + 17]) + 2.96728956e-4F*(tau_xz[t0][x + 8][y + 12][z + 12] - tau_xz[t0][x + 15][y + 12][z + 12] + tau_yz[t0][x + 12][y + 8][z + 12] - tau_yz[t0][x + 12][y + 15][z + 12] + tau_zz[t0][x + 12][y + 12][z + 9] - tau_zz[t0][x + 12][y + 12][z + 16]) + 1.74476626e-3F*(-tau_xz[t0][x + 9][y + 12][z + 12] + tau_xz[t0][x + 14][y + 12][z + 12] - tau_yz[t0][x + 12][y + 9][z + 12] + tau_yz[t0][x + 12][y + 14][z + 12] - tau_zz[t0][x + 12][y + 12][z + 10] + tau_zz[t0][x + 12][y + 12][z + 15]) + 9.6931459e-3F*(tau_xz[t0][x + 10][y + 12][z + 12] - tau_xz[t0][x + 13][y + 12][z + 12] + tau_yz[t0][x + 12][y + 10][z + 12] - tau_yz[t0][x + 12][y + 13][z + 12] + tau_zz[t0][x + 12][y + 12][z + 11] - tau_zz[t0][x + 12][y + 12][z + 14]) + 1.22133638e-1F*(-tau_xz[t0][x + 11][y + 12][z + 12] + tau_xz[t0][x + 12][y + 12][z + 12] - tau_yz[t0][x + 12][y + 11][z + 12] + tau_yz[t0][x + 12][y + 12][z + 12] - tau_zz[t0][x + 12][y + 12][z + 12] + tau_zz[t0][x + 12][y + 12][z + 13]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*v_z[t0][x + 12][y + 12][z + 12];
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, struct dataobj *restrict lam_vec, struct dataobj *restrict mu_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict lam)[lam_vec->size[1]][lam_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[lam_vec->size[1]][lam_vec->size[2]]) lam_vec->data;
float (*restrict mu)[mu_vec->size[1]][mu_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[mu_vec->size[1]][mu_vec->size[2]]) mu_vec->data;
float (*restrict tau_xx)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]]) tau_xx_vec->data;
float (*restrict tau_xy)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]]) tau_xy_vec->data;
float (*restrict tau_xz)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]]) tau_xz_vec->data;
float (*restrict tau_yy)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]]) tau_yy_vec->data;
float (*restrict tau_yz)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]]) tau_yz_vec->data;
float (*restrict tau_zz)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]]) tau_zz_vec->data;
float (*restrict v_x)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]]) v_x_vec->data;
float (*restrict v_y)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]]) v_y_vec->data;
float (*restrict v_z)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]]) v_z_vec->data;
if (x1_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic,1)
for (int x1_blk0 = x_m; x1_blk0 <= x_M; x1_blk0 += x1_blk0_size)
{
for (int y1_blk0 = y_m; y1_blk0 <= y_M; y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= x1_blk0 + x1_blk0_size - 1; x += 1)
{
for (int y = y1_blk0; y <= y1_blk0 + y1_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(damp,lam,mu,tau_xx,tau_xy,tau_xz,tau_yy,tau_yz,tau_zz,v_x,v_y,v_z:32)
for (int z = z_m; z <= z_M; z += 1)
{
float r70 = -v_z[t1][x + 12][y + 12][z + 12];
float r69 = -v_y[t1][x + 12][y + 12][z + 12];
float r68 = -v_x[t1][x + 12][y + 12][z + 12];
float r67 = -v_z[t1][x + 12][y + 12][z + 11];
float r66 = -v_y[t1][x + 12][y + 11][z + 12];
float r65 = -v_x[t1][x + 11][y + 12][z + 12];
float r64 = -v_z[t1][x + 12][y + 12][z + 9];
float r63 = -v_y[t1][x + 12][y + 9][z + 12];
float r62 = -v_x[t1][x + 9][y + 12][z + 12];
float r61 = -v_z[t1][x + 12][y + 12][z + 13];
float r60 = -v_y[t1][x + 12][y + 13][z + 12];
float r59 = -v_x[t1][x + 13][y + 12][z + 12];
float r58 = -v_z[t1][x + 12][y + 12][z + 15];
float r57 = -v_y[t1][x + 12][y + 15][z + 12];
float r56 = -v_x[t1][x + 15][y + 12][z + 12];
float r55 = -v_z[t1][x + 12][y + 12][z + 17];
float r54 = -v_y[t1][x + 12][y + 17][z + 12];
float r53 = -v_x[t1][x + 17][y + 12][z + 12];
float r52 = -v_z[t1][x + 12][y + 12][z + 7];
float r51 = -v_y[t1][x + 12][y + 7][z + 12];
float r50 = -v_x[t1][x + 7][y + 12][z + 12];
float r49 = 1.402F*(3.59005404e-5F*(r50 + r51 + r52 + v_x[t1][x + 16][y + 12][z + 12] + v_y[t1][x + 12][y + 16][z + 12] + v_z[t1][x + 12][y + 12][z + 16]) + 2.18478119e-6F*(r53 + r54 + r55 + v_x[t1][x + 6][y + 12][z + 12] + v_y[t1][x + 12][y + 6][z + 12] + v_z[t1][x + 12][y + 12][z + 6]) + 2.96728956e-4F*(r56 + r57 + r58 + v_x[t1][x + 8][y + 12][z + 12] + v_y[t1][x + 12][y + 8][z + 12] + v_z[t1][x + 12][y + 12][z + 8]) + 9.6931459e-3F*(r59 + r60 + r61 + v_x[t1][x + 10][y + 12][z + 12] + v_y[t1][x + 12][y + 10][z + 12] + v_z[t1][x + 12][y + 12][z + 10]) + 1.74476626e-3F*(r62 + r63 + r64 + v_x[t1][x + 14][y + 12][z + 12] + v_y[t1][x + 12][y + 14][z + 12] + v_z[t1][x + 12][y + 12][z + 14]) + 1.22133638e-1F*(r65 + r66 + r67 + v_x[t1][x + 12][y + 12][z + 12] + v_y[t1][x + 12][y + 12][z + 12] + v_z[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*lam[x + 12][y + 12][z + 12];
tau_xx[t1][x + 12][y + 12][z + 12] = r49 + 2.804F*(3.59005404e-5F*(r50 + v_x[t1][x + 16][y + 12][z + 12]) + 2.18478119e-6F*(r53 + v_x[t1][x + 6][y + 12][z + 12]) + 2.96728956e-4F*(r56 + v_x[t1][x + 8][y + 12][z + 12]) + 9.6931459e-3F*(r59 + v_x[t1][x + 10][y + 12][z + 12]) + 1.74476626e-3F*(r62 + v_x[t1][x + 14][y + 12][z + 12]) + 1.22133638e-1F*(r65 + v_x[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*mu[x + 12][y + 12][z + 12] + damp[x + 1][y + 1][z + 1]*tau_xx[t0][x + 12][y + 12][z + 12];
tau_xy[t1][x + 12][y + 12][z + 12] = 3.50499987602234e-1F*(mu[x + 12][y + 12][z + 12] + mu[x + 12][y + 13][z + 12] + mu[x + 13][y + 12][z + 12] + mu[x + 13][y + 13][z + 12])*(1.22133638e-1F*(r68 + r69 + v_x[t1][x + 12][y + 13][z + 12] + v_y[t1][x + 13][y + 12][z + 12]) + 2.18478119e-6F*(v_x[t1][x + 12][y + 7][z + 12] - v_x[t1][x + 12][y + 18][z + 12] + v_y[t1][x + 7][y + 12][z + 12] - v_y[t1][x + 18][y + 12][z + 12]) + 3.59005404e-5F*(-v_x[t1][x + 12][y + 8][z + 12] + v_x[t1][x + 12][y + 17][z + 12] - v_y[t1][x + 8][y + 12][z + 12] + v_y[t1][x + 17][y + 12][z + 12]) + 2.96728956e-4F*(v_x[t1][x + 12][y + 9][z + 12] - v_x[t1][x + 12][y + 16][z + 12] + v_y[t1][x + 9][y + 12][z + 12] - v_y[t1][x + 16][y + 12][z + 12]) + 1.74476626e-3F*(-v_x[t1][x + 12][y + 10][z + 12] + v_x[t1][x + 12][y + 15][z + 12] - v_y[t1][x + 10][y + 12][z + 12] + v_y[t1][x + 15][y + 12][z + 12]) + 9.6931459e-3F*(v_x[t1][x + 12][y + 11][z + 12] - v_x[t1][x + 12][y + 14][z + 12] + v_y[t1][x + 11][y + 12][z + 12] - v_y[t1][x + 14][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*tau_xy[t0][x + 12][y + 12][z + 12];
tau_xz[t1][x + 12][y + 12][z + 12] = 3.50499987602234e-1F*(mu[x + 12][y + 12][z + 12] + mu[x + 12][y + 12][z + 13] + mu[x + 13][y + 12][z + 12] + mu[x + 13][y + 12][z + 13])*(1.22133638e-1F*(r68 + r70 + v_x[t1][x + 12][y + 12][z + 13] + v_z[t1][x + 13][y + 12][z + 12]) + 2.18478119e-6F*(v_x[t1][x + 12][y + 12][z + 7] - v_x[t1][x + 12][y + 12][z + 18] + v_z[t1][x + 7][y + 12][z + 12] - v_z[t1][x + 18][y + 12][z + 12]) + 3.59005404e-5F*(-v_x[t1][x + 12][y + 12][z + 8] + v_x[t1][x + 12][y + 12][z + 17] - v_z[t1][x + 8][y + 12][z + 12] + v_z[t1][x + 17][y + 12][z + 12]) + 2.96728956e-4F*(v_x[t1][x + 12][y + 12][z + 9] - v_x[t1][x + 12][y + 12][z + 16] + v_z[t1][x + 9][y + 12][z + 12] - v_z[t1][x + 16][y + 12][z + 12]) + 1.74476626e-3F*(-v_x[t1][x + 12][y + 12][z + 10] + v_x[t1][x + 12][y + 12][z + 15] - v_z[t1][x + 10][y + 12][z + 12] + v_z[t1][x + 15][y + 12][z + 12]) + 9.6931459e-3F*(v_x[t1][x + 12][y + 12][z + 11] - v_x[t1][x + 12][y + 12][z + 14] + v_z[t1][x + 11][y + 12][z + 12] - v_z[t1][x + 14][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*tau_xz[t0][x + 12][y + 12][z + 12];
tau_yy[t1][x + 12][y + 12][z + 12] = r49 + 2.804F*(3.59005404e-5F*(r51 + v_y[t1][x + 12][y + 16][z + 12]) + 2.18478119e-6F*(r54 + v_y[t1][x + 12][y + 6][z + 12]) + 2.96728956e-4F*(r57 + v_y[t1][x + 12][y + 8][z + 12]) + 9.6931459e-3F*(r60 + v_y[t1][x + 12][y + 10][z + 12]) + 1.74476626e-3F*(r63 + v_y[t1][x + 12][y + 14][z + 12]) + 1.22133638e-1F*(r66 + v_y[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*mu[x + 12][y + 12][z + 12] + damp[x + 1][y + 1][z + 1]*tau_yy[t0][x + 12][y + 12][z + 12];
tau_yz[t1][x + 12][y + 12][z + 12] = 3.50499987602234e-1F*(mu[x + 12][y + 12][z + 12] + mu[x + 12][y + 12][z + 13] + mu[x + 12][y + 13][z + 12] + mu[x + 12][y + 13][z + 13])*(1.22133638e-1F*(r69 + r70 + v_y[t1][x + 12][y + 12][z + 13] + v_z[t1][x + 12][y + 13][z + 12]) + 2.18478119e-6F*(v_y[t1][x + 12][y + 12][z + 7] - v_y[t1][x + 12][y + 12][z + 18] + v_z[t1][x + 12][y + 7][z + 12] - v_z[t1][x + 12][y + 18][z + 12]) + 3.59005404e-5F*(-v_y[t1][x + 12][y + 12][z + 8] + v_y[t1][x + 12][y + 12][z + 17] - v_z[t1][x + 12][y + 8][z + 12] + v_z[t1][x + 12][y + 17][z + 12]) + 2.96728956e-4F*(v_y[t1][x + 12][y + 12][z + 9] - v_y[t1][x + 12][y + 12][z + 16] + v_z[t1][x + 12][y + 9][z + 12] - v_z[t1][x + 12][y + 16][z + 12]) + 1.74476626e-3F*(-v_y[t1][x + 12][y + 12][z + 10] + v_y[t1][x + 12][y + 12][z + 15] - v_z[t1][x + 12][y + 10][z + 12] + v_z[t1][x + 12][y + 15][z + 12]) + 9.6931459e-3F*(v_y[t1][x + 12][y + 12][z + 11] - v_y[t1][x + 12][y + 12][z + 14] + v_z[t1][x + 12][y + 11][z + 12] - v_z[t1][x + 12][y + 14][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*tau_yz[t0][x + 12][y + 12][z + 12];
tau_zz[t1][x + 12][y + 12][z + 12] = r49 + 2.804F*(3.59005404e-5F*(r52 + v_z[t1][x + 12][y + 12][z + 16]) + 2.18478119e-6F*(r55 + v_z[t1][x + 12][y + 12][z + 6]) + 2.96728956e-4F*(r58 + v_z[t1][x + 12][y + 12][z + 8]) + 9.6931459e-3F*(r61 + v_z[t1][x + 12][y + 12][z + 10]) + 1.74476626e-3F*(r64 + v_z[t1][x + 12][y + 12][z + 14]) + 1.22133638e-1F*(r67 + v_z[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*mu[x + 12][y + 12][z + 12] + damp[x + 1][y + 1][z + 1]*tau_zz[t0][x + 12][y + 12][z + 12];
}
}
}
}
}
}
}
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/color-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/resample.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
PixelInterpolateMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterType
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
PixelInfo
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof(
*resample_filter));
(void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,
exception);
resample_filter->debug=IsEventLogging();
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined=MagickFalse;
resample_filter->signature=MagickCoreSignature;
SetResampleFilter(resample_filter,image->filter);
(void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,PixelInfo *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const Quantum *pixels;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetPixelInfo(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
resample_filter->interpolate,u0,v0,pixel,resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there more direct way? */
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetPixelInfo(resample_filter->image,(PixelInfo *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,exception);
pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const Quantum *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
GetPixelInfoPixel(resample_filter->image,pixels,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((double)
resample_filter->average_pixel.alpha);
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((double)
resample_filter->image->background_color.alpha);
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.alpha +=
resample_filter->image->background_color.alpha;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.alpha /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->colorspace == CMYKColorspace)
pixel->black = 0.0;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const Quantum *) NULL)
return(MagickFalse);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels);
divisor_m += weight;
if (pixel->alpha_trait != UndefinedPixelTrait)
weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels));
pixel->red += weight*GetPixelRed(resample_filter->image,pixels);
pixel->green += weight*GetPixelGreen(resample_filter->image,pixels);
pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels);
if (pixel->colorspace == CMYKColorspace)
pixel->black += weight*GetPixelBlack(resample_filter->image,pixels);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels+=GetPixelChannels(resample_filter->image);
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->alpha = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha);
divisor_c = 1.0/divisor_c;
pixel->red = (double) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (double) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->black = (double) ClampToQuantum(divisor_c*pixel->black);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major=MagickMaximumValue;
else
Major=sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterType filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterType filter)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsStringTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) != MagickFalse)
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const PixelInterpolateMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
8214.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp target teams distribute thread_limit(64)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp target teams distribute thread_limit(64)
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp target teams distribute thread_limit(64)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
tng_parallel_read.c | #ifdef TNG_BUILD_OPENMP_EXAMPLES
/* This code is part of the tng binary trajectory format.
*
* Written by Magnus Lundborg
* Copyright (c) 2012-2013, The GROMACS development team.
* Check out http://www.gromacs.org for more information.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Revised BSD License.
*/
#include "tng/tng_io.h"
#include <stdlib.h>
#include <stdio.h>
/* N.B. this code is for testing parallel reading of trajectory frame sets. The
* performance is not improved very much and is to a large extent limited by
* disk i/o. It can however be used as inspiration for writing parallel code
* using the TNG library. The code is NOT fully tested and may behave strangely. */
int main(int argc, char **argv)
{
tng_trajectory_t traj, local_traj = 0;
union data_values ***local_positions = 0; // A 3-dimensional array to be populated
union data_values **particle_pos = 0;
int64_t n_particles, n_values_per_frame, n_frame_sets, n_frames;
int64_t n_frames_per_frame_set, tot_n_frames = 0;
char data_type;
int i, j, fail;
int64_t particle = 0, local_first_frame, local_last_frame;
char atom_name[64], res_name[64];
tng_trajectory_frame_set_t frame_set = 0;
if(argc <= 1)
{
printf("No file specified\n");
printf("Usage:\n");
printf("tng_parallel_read <tng_file> [particle number = %"PRId64"]\n",
particle);
exit(1);
}
// A reference must be passed to allocate memory
if(tng_trajectory_init(&traj) != TNG_SUCCESS)
{
tng_trajectory_destroy(&traj);
exit(1);
}
tng_input_file_set(traj, argv[1]);
tng_current_frame_set_get(traj, &frame_set);
// Read the file headers
tng_file_headers_read(traj, TNG_USE_HASH);
if(argc >= 3)
{
particle = strtoll(argv[2], 0, 10);
}
tng_num_frame_sets_get(traj, &n_frame_sets);
tng_num_frames_per_frame_set_get(traj, &n_frames_per_frame_set);
particle_pos = malloc(sizeof(union data_values *) * n_frame_sets *
n_frames_per_frame_set);
for(i = n_frame_sets * n_frames_per_frame_set; i--;)
{
/* Assume 3 values per frame even if it's not determined yet */
particle_pos[i] = malloc(sizeof(union data_values) * 3);
}
printf("%"PRId64" frame sets\n", n_frame_sets);
if(tng_atom_name_of_particle_nr_get(traj, particle, atom_name,
sizeof(atom_name)) ==
TNG_SUCCESS &&
tng_residue_name_of_particle_nr_get(traj, particle, res_name,
sizeof(res_name)) ==
TNG_SUCCESS)
{
printf("Particle: %s (%s)\n", atom_name, res_name);
}
else
{
printf("Particle name not found\n");
}
fail = 0;
#pragma omp parallel \
private (n_frames, n_particles, n_values_per_frame, \
local_first_frame, local_last_frame, j, fail) \
firstprivate (local_traj, local_positions, frame_set)\
shared(data_type, traj, n_frame_sets, particle_pos, particle, i, tot_n_frames)\
default(none)
{
/* Each tng_trajectory_t keeps its own file pointers and i/o positions.
* Therefore there must be a copy for each thread. */
tng_trajectory_init_from_src(traj, &local_traj);
#pragma omp for
for(i = 0; i < n_frame_sets; i++)
{
if(tng_frame_set_nr_find(local_traj, i) != TNG_SUCCESS)
{
printf("FAILED finding frame set %d!\n", i);
tot_n_frames = 0;
fail = 1;
}
if(tng_particle_data_get(local_traj, TNG_TRAJ_POSITIONS, &local_positions,
&n_frames, &n_particles, &n_values_per_frame,
&data_type) != TNG_SUCCESS)
{
printf("FAILED getting particle data\n");
tot_n_frames = 0;
fail = 1;
}
if(!fail)
{
tng_current_frame_set_get(local_traj, &frame_set);
tng_frame_set_frame_range_get(local_traj, frame_set, &local_first_frame, &local_last_frame);
// printf("Frame %"PRId64"-%"PRId64":\n", local_first_frame, local_last_frame);
// printf("%"PRId64" %"PRId64" %"PRId64"\n", n_frames, n_particles, n_values_per_frame);
tot_n_frames += n_frames;
for(j = 0; j < n_frames; j++)
{
particle_pos[local_first_frame + j][0] = local_positions[j][particle][0];
particle_pos[local_first_frame + j][1] = local_positions[j][particle][1];
particle_pos[local_first_frame + j][2] = local_positions[j][particle][2];
}
}
}
// Free memory
if(local_positions)
{
tng_particle_data_values_free(local_traj, local_positions, n_frames, n_particles,
n_values_per_frame, data_type);
}
tng_trajectory_destroy(&local_traj);
}
switch(data_type)
{
case TNG_INT_DATA:
for(j = 0; j < tot_n_frames; j++)
{
printf("\t%"PRId64"\t%"PRId64"\t%"PRId64"\n", particle_pos[j][0].i,
particle_pos[j][1].i, particle_pos[j][2].i);
}
break;
case TNG_FLOAT_DATA:
for(j = 0; j < tot_n_frames; j++)
{
printf("\t%f\t%f\t%f\n", particle_pos[j][0].f,
particle_pos[j][1].f, particle_pos[j][2].f);
}
break;
case TNG_DOUBLE_DATA:
for(j = 0; j < tot_n_frames; j++)
{
printf("\t%f\t%f\t%f\n", particle_pos[j][0].d,
particle_pos[j][1].d, particle_pos[j][2].d);
}
break;
default:
break;
}
/* Free more memory */
for(i = n_frame_sets * n_frames_per_frame_set; i--;)
{
free(particle_pos[i]);
}
free(particle_pos);
tng_trajectory_destroy(&traj);
return(0);
}
#endif
|
demag_full.c | #include "clib.h"
#include "math.h"
#include "stdlib.h"
void demag_full(double *restrict spin, double *restrict field, double *restrict energy, double *restrict coords,
double *restrict mu_s, double *restrict mu_s_scale, int n) {
/* Full calculation of the Demagnetising field for atomistic systems
* The main idea is to iterate through every lattice site, and sum
* the dipolar contribution of the whole system. This is not the most
* effective aproach since it needs N^2 calculations, but it is reliable
* to compare against other approximations or techniques
*
* Thus, for the i-th spin, the field is calculated as:
*
* ^ ^ ^ ^
* -> mu0 mu_s __ 3 r_ij ( m_j \cdot r_ij ) - m_j
* H_i = -------- \ -------------------------------
* 4 pi /__ r_ij ^ 3
*
* i != j
*
* where the numerator has unit vectors.
* r_ij is a vector from r_i to r_j , i.e. r_ij = r_j - r_i
*
* The prefactor for every lattice site, is stored in the mu_s_scale
* array, since mu_s will depend on the site (different or no material)
*
* The spin and field are vector fields, thus they have 3 * n entries,
* where n is the number of lattice sites of the system (including the
* ones with mu_s = 0)
* The coords array also has 3 * n entries since it has the coordinates
* for every point, in the order: x0 y0 z0 x1 y1 z1 x2 ...
*
* mu_s has the magnetic moments magnitudes and it is used for computing
* the energy density of the i-th spin as:
*
* mu_s __ ^ ->
* E_i = - -- \ m_i \cdot H_i
* 2 /__
*
* i=x,y,z
*
* the 1/2 factor is for the double counting.
*
*/
/* rij for the distance vector and rij_n for the normalised
* version of the vector. rij_mag is the magnitude */
double rij[3];
double rij_n[3];
/* we start iterating through every lattice site */
#pragma omp parallel for private(rij, rij_n)
for (int i = 0; i < n; i++) {
double rij_mag;
/* This is for the dot product of m and rij */
double mrij = 0;
/* Reset the field values */
for (int k = 0; k < 3; k++) field[3 * i + k] = 0;
/* Now we iterate through every spin of the system excluding
* the point where we are right now */
for (int j = 0; j < n; j++) {
/* Avoid sites with mu_s = 0 */
if(j != i && mu_s_scale[j] != 0.){
/* Compute the distance from r_i to r_j and normalise */
for(int k = 0; k < 3; k++) {
rij[k] = coords[3 * j + k] - coords[3 * i + k];
}
rij_mag = sqrt(rij[0] * rij[0] +
rij[1] * rij[1] +
rij[2] * rij[2]);
for(int k = 0; k < 3; k++) rij_n[k] = rij[k] / rij_mag;
/* dot product of m_j and r_ij (normalised)
* Remember that m has the structure: mx0 my0 mz0 mx1 my1 ...
*/
mrij = spin[3 * j] * rij_n[0] + spin[3 * j + 1] * rij_n[1]
+ spin[3 * j + 2] * rij_n[2] ;
/* Now add the contribution of the j-th spin */
for(int k = 0; k < 3; k++){
field[3 * i + k] += (3 * rij_n[k] * mrij - spin[3 * j + k])
/ (rij_mag * rij_mag * rij_mag) ;
}
}
}
/* Now we scale the total contribution to the i-th spin */
for(int k = 0; k < 3; k++) field[3 * i + k] *= mu_s_scale[i];
/* And compute the energy avoiding double counting */
energy[i] = -0.5 * mu_s[i] * (field[3 * i] * spin[3 * i] +
field[3 * i + 1] * spin[3 * i + 1] +
field[3 * i + 2] * spin[3 * i + 2]);
}
}
|
geli_fmt_plug.c | /*
* JtR format to crack password protected FreeBSD GELI volumes.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_geli;
#elif FMT_REGISTERS_H
john_register_one(&fmt_geli);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "hmac_sha.h"
#include "aes.h"
#include "pbkdf2_hmac_sha512.h"
#include "jumbo.h"
#include "memdbg.h"
#include "geli_common.h"
#define FORMAT_LABEL "geli"
#define FORMAT_NAME "FreeBSD GELI"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (custom_salt *)salt;
}
static void geli_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][G_ELI_USERKEYLEN];
unsigned char key[MAX_KEYS_PER_CRYPT][G_ELI_USERKEYLEN];
int i;
#ifdef SIMD_COEF_64
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha512_sse((const unsigned char**)pin, lens, cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, pout, G_ELI_USERKEYLEN, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha512((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, master[i], G_ELI_USERKEYLEN, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
JTR_hmac_sha512((const unsigned char*)"", 0, master[i], G_ELI_USERKEYLEN, key[i], G_ELI_USERKEYLEN);
cracked[index+i] = geli_decrypt_verify(cur_salt, key[i]);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_geli = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
geli_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
geli_common_valid,
fmt_default_split,
fmt_default_binary,
geli_common_get_salt,
{
geli_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
geli_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
}
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
openmp.c | #include <stdio.h>
#include <omp.h>
#define size 32
#define N 5
#define r 0.1
#define value 32
//size is a grid size and N is how many times to calculate
int main(){
omp_set_num_threads(value);
float u_odd [size+2][size+2];
float u_even [size+2][size+2];
#pragma ompparallel for private(j)
for(int i = 0; i < size+2; i++){
for(int j = 0; j < size+2; j++){
u_even[i][j] = 0;
u_odd[i][j] = 0;
}
}
//initialize u_even
#pragma ompparallel for private(j)
for(int i = 1; i < size+1; i++){
for(int j = 1; j < size+1; j++){
u_even[i][j] = 1;
}
}
for(int n = 0; n < N; n++){
if (n%2 == 0){
#pragma ompparallel for private(j)
for(int i = 1 ; i < size+1; i++){
for(int j = 1; j < size+1; j++){
u_odd[i][j] = (1-4*r)*u_even[i][j] + r*(u_even[i+1][j]+u_even[i-1][j]+u_even[i][j+1] + u_even[i][j-1]);
}
}
}
else {
#pragma ompparallel for private(j)
for(int i = 1 ; i < size+1; i++){
for(int j = 1; j < size+1; j++){
u_even[i][j] = (1-4*r)*u_odd[i][j] + r*(u_odd[i+1][j]+u_odd[i-1][j]+u_odd[i][j+1] + u_odd[i][j-1]);
}
}
}
}
#pragma ompparallel for private(j)
for(int i = 1; i < size+1; i++){
for(int j = 1; j < size+1; j++){
printf("%f ",u_even[i][j]);
}
printf("\n");
}
return 0;
}
|
pysnobal.h | /*
** NAME
** pgm.h
**
** DESCRIPTION
** The include file for 'isnobal'.
*/
#ifndef _ISNOBAL_H_
#define _ISNOBAL_H_
#define DEFAULT_Z_U 5.0 /* default wind speed measurement height */
#define DEFAULT_Z_T 5.0 /* default air temp and vapor press hght */
#define IBANDS 6 /* # bands in input image */
#define EMBANDS 10 /* # bands in energy/mass output image */
#define SBANDS 9 /* # bands in snow output image */
#define PBANDS 4 /* # bands in precip image */
#define ICBANDS 7 /* # bands in initial conditions image */
#define ICBANDS_RESTART 8 /* # bands in init cond image (restart) */
#define TBANDS 17 /* # bands in temporary results file */
#define NO_DATA -999999 /* output value for masked pnt (no data) */
typedef struct {
int masked;
double current_time;
double time_since_out;
double elevation;
double z_0;
double rho;
double T_s_0;
double T_s_l;
double T_s;
double h2o_sat;
double h2o_max;
double h2o;
double h2o_vol;
double h2o_total;
int layer_count;
double cc_s_0;
double cc_s_l;
double cc_s;
double m_s_0;
double m_s_l;
double m_s;
double z_s_0;
double z_s_l;
double z_s;
double R_n_bar;
double H_bar;
double L_v_E_bar;
double G_bar;
double G_0_bar;
double M_bar;
double delta_Q_bar;
double delta_Q_0_bar;
double E_s_sum;
double melt_sum;
double ro_pred_sum;
} OUTPUT_REC;
//typedef OUTPUT_REC *out_p;
//extern OUTPUT_REC output_rec[100]; /* output data structure */
typedef struct {
int* masked;
double* current_time;
double* time_since_out;
double* elevation;
double* z_0;
double* rho;
double* T_s_0;
double* T_s_l;
double* T_s;
double* h2o_sat;
double* h2o_max;
double* h2o_vol;
double* h2o;
double* h2o_total;
int* layer_count;
double* cc_s_0;
double* cc_s_l;
double* cc_s;
double* m_s_0;
double* m_s_l;
double* m_s;
double* z_s_0;
double* z_s_l;
double* z_s;
double* R_n_bar;
double* H_bar;
double* L_v_E_bar;
double* G_bar;
double* G_0_bar;
double* M_bar;
double* delta_Q_bar;
double* delta_Q_0_bar;
double* E_s_sum;
double* melt_sum;
double* ro_pred_sum;
} OUTPUT_REC_ARR;
typedef struct {
double* S_n;
double* I_lw;
double* T_a;
double* e_a;
double* u;
double* T_g;
double* m_pp;
double* percent_snow;
double* rho_snow;
double* T_pp;
} INPUT_REC_ARR;
typedef struct {
double z_u;
double z_T;
double z_g;
int relative_heights;
double max_h2o_vol;
double max_z_s_0;
} PARAMS;
/* ------------------------------------------------------------------------- */
/*
* Routines that are part of isnobal program.
*/
//extern int call_snobal(int N, int nthreads, int first_step, TSTEP_REC tstep_info[4], OUTPUT_REC** output_rec, INPUT_REC_ARR* input1, INPUT_REC_ARR* input2, PARAMS params, OUTPUT_REC_ARR* output1);
extern int call_snobal(int N, int nthreads, int first_step, TSTEP_REC tstep_info[4], INPUT_REC_ARR* input1, INPUT_REC_ARR* input2, PARAMS params, OUTPUT_REC_ARR* output1);
//extern void assign_buffers (int masked, int n, int output, OUTPUT_REC **output_rec);
//extern void buffers (void);
//extern void check_range (int index, double value, double min, double max,
// char * descrip, bool_t print_line_samp);
//extern void check_units (LQH_T **lq_headers, UNITS_T *units, int nbands,
// int fd);
//extern void copy_image (char *tempfile, int nbands, fpixel_t * buf,
// int fdo);
//extern void e_m_image (int step, OUTPUT_REC **output_rec, int nbits);
//extern bool_t extract_data (bool_t first_step, int n, bool_t sun_up[], OUTPUT_REC **output_rec);
//extern void headers (void);
//extern void isnobal (int out_step, int nthreads, int dynamic_teams, int got_opt_F, int verbose, int nbits);
///*extern void isnobal (int out_step);*/
//extern void newlqh (int fdo, int nbands, fpixel_t *mins,
// fpixel_t *maxs, char **units);
//extern int open_input (char *prefix, int index, bool_t *sun_up);
//extern int output_image (char * filename, int nbands, char ** units,
// char ** annots, fpixel_t * mins,
// fpixel_t * maxs, int nbits);
//extern bool_t precip_event (float curr_time, char *pre_img);
//extern void precip_hdrs (char *filename);
//extern void read_data (int first_step);
//extern void snow_image (int step, OUTPUT_REC **output_rec, int nbits);
//extern void temp_filename (char *prefix, char *filename);
//extern void warn_range (int index, double value, double min, double max,
// char * descrip, bool_t print_line_samp);
//extern void write_data (int output, int last_step);
/* ------------------------------------------------------------------------- */
/*
* Global variables internal to isnobal program.
*/
extern int units_warn; /* check units in input images? */
extern char *compress_cmd; /* shell command to compress images */
/* timesteps and indices */
extern int start_step; /* index of first timestep */
extern int nstep; /* # of data timesteps */
extern int nDigits; /* # of digits in suffixes of images*/
extern bool_t restart; /* restart flag */
/* model variables */
extern double elevation;
//#pragma omp threadprivate(elevation)
#endif /* _ISNOBAL_H_ */
|
pooling_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling3x3s2_max_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%1] \n"
"fmax v20.4s, v16.4s, v2.4s \n"
"fmax v21.4s, v17.4s, v4.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"fmax v22.4s, v18.4s, v6.4s \n"
"fmax v23.4s, v19.4s, v8.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%2] \n"
"fmax v24.4s, v16.4s, v2.4s \n"
"fmax v25.4s, v17.4s, v4.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmax v26.4s, v18.4s, v6.4s \n"
"fmax v27.4s, v19.4s, v8.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%3] \n"
"fmax v28.4s, v16.4s, v2.4s \n"
"fmax v29.4s, v17.4s, v4.4s \n"
"fmax v30.4s, v18.4s, v6.4s \n"
"fmax v31.4s, v19.4s, v8.4s \n"
"fmax v20.4s, v20.4s, v24.4s \n"
"fmax v21.4s, v21.4s, v25.4s \n"
"fmax v22.4s, v22.4s, v26.4s \n"
"fmax v23.4s, v23.4s, v27.4s \n"
"fmax v20.4s, v20.4s, v28.4s \n"
"fmax v21.4s, v21.4s, v29.4s \n"
"fmax v22.4s, v22.4s, v30.4s \n"
"fmax v23.4s, v23.4s, v31.4s \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"
"vmax.f32 q0, q0, q4 \n"
"vmax.f32 q1, q1, q5 \n"
"pld [%3, #512] \n"
"vldm %3!, {d16-d23} \n"
"vmax.f32 q2, q2, q6 \n"
"vmax.f32 q3, q3, q7 \n"
"vmax.f32 q0, q0, q8 \n"
"vmax.f32 q1, q1, q9 \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
"vmax.f32 q2, q2, q10 \n"
"vmax.f32 q3, q3, q11 \n"
"pld [%2, #512] \n"
"vldm %2!, {d16-d23} \n"
"vmax.f32 q4, q4, q8 \n"
"vmax.f32 q5, q5, q9 \n"
"pld [%3, #512] \n"
"vldm %3!, {d24-d31} \n"
"vmax.f32 q6, q6, q10 \n"
"vmax.f32 q7, q7, q11 \n"
"vmax.f32 q4, q4, q12 \n"
"vmax.f32 q5, q5, q13 \n"
"vld1.f32 {d24-d25}, [%1 :128] \n"
"vld1.f32 {d26-d27}, [%2 :128] \n"
"vmax.f32 q6, q6, q14 \n"
"vmax.f32 q7, q7, q15 \n"
"vld1.f32 {d28-d29}, [%3 :128] \n"
"vmax.f32 q8, q12, q13 \n"
"vmax.f32 q8, q8, q14 \n"
"vmax.f32 q12, q0, q1 \n"
"vmax.f32 q13, q2, q3 \n"
"vmax.f32 q14, q4, q5 \n"
"vmax.f32 q15, q6, q7 \n"
"vmax.f32 q12, q12, q2 \n"
"vmax.f32 q13, q13, q4 \n"
"vmax.f32 q14, q14, q6 \n"
"vmax.f32 q15, q15, q8 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"
"fmax v16.4s, v0.4s, v4.4s \n"
"fmax v17.4s, v1.4s, v5.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%3], #64 \n"
"fmax v18.4s, v2.4s, v6.4s \n"
"fmax v19.4s, v3.4s, v7.4s \n"
"ld1 {v0.4s}, [%1] \n"
"fmax v16.4s, v16.4s, v20.4s \n"
"fmax v17.4s, v17.4s, v21.4s \n"
"ld1 {v1.4s}, [%2] \n"
"fmax v18.4s, v18.4s, v22.4s \n"
"fmax v19.4s, v19.4s, v23.4s \n"
"ld1 {v2.4s}, [%3] \n"
"fmax v3.4s, v0.4s, v1.4s \n"
"fmax v20.4s, v16.4s, v17.4s \n"
"fmax v21.4s, v18.4s, v19.4s \n"
"fmax v3.4s, v3.4s, v2.4s \n"
"fmax v20.4s, v20.4s, v18.4s \n"
"fmax v21.4s, v21.4s, v3.4s \n"
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"
"vmax.f32 q12, q0, q4 \n"
"vmax.f32 q13, q1, q5 \n"
"pld [%3, #512] \n"
"vldm %3!, {d16-d23} \n"
"vmax.f32 q14, q2, q6 \n"
"vmax.f32 q15, q3, q7 \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q12, q12, q8 \n"
"vmax.f32 q13, q13, q9 \n"
"vld1.f32 {d2-d3}, [%2 :128] \n"
"vmax.f32 q14, q14, q10 \n"
"vmax.f32 q15, q15, q11 \n"
"vld1.f32 {d4-d5}, [%3 :128] \n"
"vmax.f32 q3, q0, q1 \n"
"vmax.f32 q4, q12, q13 \n"
"vmax.f32 q5, q14, q15 \n"
"vmax.f32 q3, q3, q2 \n"
"vmax.f32 q4, q4, q14 \n"
"vmax.f32 q5, q5, q3 \n"
"vst1.f32 {d8-d11}, [%0 :128]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _max0 = vmaxq_f32(vmaxq_f32(_r00, _r01), _r02);
float32x4_t _max1 = vmaxq_f32(vmaxq_f32(_r10, _r11), _r12);
float32x4_t _max2 = vmaxq_f32(vmaxq_f32(_r20, _r21), _r22);
float32x4_t _max = vmaxq_f32(vmaxq_f32(_max0, _max1), _max2);
vst1q_f32(outptr, _max);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
amdgcn-openmp-device-math-complex.c | // RUN: %clang_cc1 -internal-isystem %S/Inputs/include -x c -fopenmp -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-host.bc
// RUN: %clang_cc1 -internal-isystem %S/../../lib/Headers/openmp_wrappers -include __clang_openmp_device_functions.h -internal-isystem %S/../../lib/Headers/openmp_wrappers -internal-isystem %S/Inputs/include -x c -fopenmp -triple amdgcn-amd-amdhsa -aux-triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-host.bc -o - | FileCheck %s --check-prefixes=CHECK
#include <complex.h>
void test_complex_f64(double _Complex a) {
// CHECK-LABEL: define {{.*}}test_complex_f64
#pragma omp target
{
// CHECK: call { double, double } @__divdc3
// CHECK: call { double, double } @__muldc3
(void)(a * (a / a));
}
}
// CHECK: define weak {{.*}} @__divdc3
// CHECK-DAG: call double @__ocml_fabs_f64(
// CHECK-DAG: call i32 @__ocml_isnan_f64(
// CHECK-DAG: call i32 @__ocml_isfinite_f64(
// CHECK-DAG: call double @__ocml_copysign_f64(
// CHECK-DAG: call double @__ocml_scalbn_f64(
// CHECK-DAG: call double @__ocml_logb_f64(
// CHECK: define weak {{.*}} @__muldc3
// CHECK-DAG: call i32 @__ocml_isnan_f64(
// CHECK-DAG: call i32 @__ocml_isinf_f64(
// CHECK-DAG: call double @__ocml_copysign_f64(
void test_complex_f32(float _Complex a) {
// CHECK-LABEL: define {{.*}}test_complex_f32
#pragma omp target
{
// CHECK: call [2 x i32] @__divsc3
// CHECK: call [2 x i32] @__mulsc3
(void)(a * (a / a));
}
}
// CHECK: define weak {{.*}} @__divsc3
// CHECK-DAG: call float @__ocml_fabs_f32(
// CHECK-DAG: call i32 @__ocml_isnan_f32(
// CHECK-DAG: call i32 @__ocml_isfinite_f32(
// CHECK-DAG: call float @__ocml_copysign_f32(
// CHECK-DAG: call float @__ocml_scalbn_f32(
// CHECK-DAG: call float @__ocml_logb_f32(
// CHECK: define weak {{.*}} @__mulsc3
// CHECK-DAG: call i32 @__ocml_isnan_f32(
// CHECK-DAG: call i32 @__ocml_isinf_f32(
// CHECK-DAG: call float @__ocml_copysign_f32(
|
main.c | #include<stdio.h>
#include<stdlib.h>
#include <sys/time.h>
#include<omp.h>
void merge(int l1, int r1, int r2, int* data, int* temp) {
int top = l1, p = l1, q = r1;
while (p < r1 || q < r2) {
if (q >= r2 || (p < r1 && data[p] <= data[q])) {
temp[top++] = data[p++];
}
else {
temp[top++] = data[q++];
}
}
for (top = l1; top < r2; top++) {
data[top] = temp[top];
}
}
void merge_sort(int l, int r, int* data, int N) {
int i, j, t, * temp;
temp = (int*)malloc(N * sizeof(int));
#pragma omp parallel for private(i, t) shared(N, data)
for (i = 0; i < N / 2; i++) {
if (data[i * 2] > data[i * 2 + 1]) {
t = data[i * 2];
data[i * 2] = data[i * 2 + 1];
data[i * 2 + 1] = t;
}
//printf("Hello world from #%d!\n", omp_get_thread_num());
}
for (i = 2; i < r; i *= 2) {
#pragma omp parallel for private(j) shared(r, i)
for (j = 0; j < r - i; j += i * 2) {
merge(j, j + i, (j + i * 2 < r ? j + i * 2 : r), data, temp);
//printf("Hello world from #%d!\n", omp_get_thread_num());
}
}
}
int main()
{
float time_use=0;//记录时间的参数
struct timeval start;
struct timeval end;
int len = 1000000;//数据长度
int num_threads = 64;//线程数量
int* data = (int*)malloc(sizeof(int) * len);//为数组分配内存
for (int j = 0; j < len; j++) {//为数组赋值
data[j] = len - j;
}
omp_set_num_threads(num_threads);
gettimeofday(&start,NULL);//开始记录时间
merge_sort(0, len, data, len);
gettimeofday(&end,NULL);//结束时间记录
time_use=(end.tv_sec-start.tv_sec)*1000+(end.tv_usec-start.tv_usec)/1000;//毫秒
/*for (int i = 0; i < 100;i++) {
printf("%d \n",data[i]);
}*/
//printf("Serial program sort 512M Data, time usage is %.4f ms .\n",time_use);
printf("64 threads parallel program sort 1M Data, time usage is %.4f ms .\n",time_use);
}
|
par_multi_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* hypre_ParAMGBuildMultipass
* This routine implements Stuben's direct interpolation with multiple passes.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int P_max_elmts,
HYPRE_Int weight_option,
hypre_ParCSRMatrix **P_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = NULL;
//HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
/*HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = NULL;*/
HYPRE_Int num_cols_offd;
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_diag_j;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_offd_j = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_Int *send_map_start;
HYPRE_Int *send_map_elmt;
HYPRE_Int *send_procs;
HYPRE_Int num_recvs = 0;
HYPRE_Int *recv_vec_start;
HYPRE_Int *recv_procs;
HYPRE_Int *new_recv_vec_start = NULL;
HYPRE_Int **Pext_send_map_start = NULL;
HYPRE_Int **Pext_recv_vec_start = NULL;
HYPRE_Int *Pext_start = NULL;
HYPRE_Int *P_ncols = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
HYPRE_Int *P_marker;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *C_array;
HYPRE_Int *C_array_offd = NULL;
HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */
HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first
point of pass j contained in pass_array */
HYPRE_Int *P_diag_start;
HYPRE_Int *P_offd_start = NULL;
HYPRE_Int **P_diag_pass;
HYPRE_Int **P_offd_pass = NULL;
HYPRE_Int **Pext_pass = NULL;
HYPRE_BigInt *big_temp_pass = NULL;
HYPRE_BigInt **new_elmts = NULL; /* new neighbors generated in each pass */
HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for
each pass */
HYPRE_Int *loc = NULL; /* contains locations for new neighbor
connections in int_o_buffer to avoid searching */
HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero
cols of off proc neighbors */
HYPRE_BigInt *Pext_send_buffer = NULL; /* used to collect global nonzero
col ids in P_diag for send_map_elmts */
HYPRE_Int *map_S_to_new = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_Int *permute = NULL;
HYPRE_BigInt *big_permute = NULL;
HYPRE_Int cnt;
HYPRE_Int cnt_nz;
HYPRE_Int total_nz;
HYPRE_Int pass;
HYPRE_Int num_passes;
HYPRE_Int max_num_passes = 10;
HYPRE_Int n_fine;
HYPRE_Int n_coarse = 0;
HYPRE_Int n_coarse_offd = 0;
HYPRE_Int n_SF = 0;
HYPRE_Int n_SF_offd = 0;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *assigned = NULL;
HYPRE_Int *assigned_offd = NULL;
HYPRE_Real *Pext_send_data = NULL;
HYPRE_Real *Pext_data = NULL;
HYPRE_Real sum_C, sum_N;
HYPRE_Real sum_C_pos, sum_C_neg;
HYPRE_Real sum_N_pos, sum_N_neg;
HYPRE_Real diagonal;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Int j_start;
HYPRE_Int j_end;
HYPRE_Int i,i1;
HYPRE_Int j,j1;
HYPRE_Int k,k1,k2,k3;
HYPRE_BigInt big_k1;
HYPRE_Int pass_array_size;
HYPRE_BigInt global_pass_array_size;
HYPRE_BigInt local_pass_array_size;
HYPRE_Int my_id, num_procs;
HYPRE_Int index, start;
HYPRE_BigInt my_first_cpt;
HYPRE_BigInt total_global_cpts;
HYPRE_Int p_cnt;
HYPRE_Int total_nz_offd;
HYPRE_Int cnt_nz_offd;
HYPRE_Int cnt_offd, cnt_new;
HYPRE_Int no_break;
HYPRE_Int not_found;
HYPRE_Int Pext_send_size;
HYPRE_Int Pext_recv_size;
HYPRE_Int old_Pext_send_size;
HYPRE_Int old_Pext_recv_size;
HYPRE_Int P_offd_size = 0;
HYPRE_Int local_index = -1;
HYPRE_Int new_num_cols_offd = 0;
HYPRE_Int num_cols_offd_P;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop;
HYPRE_Int pass_length;
HYPRE_Int *tmp_marker, *tmp_marker_offd;
HYPRE_Int *tmp_array, *tmp_array_offd;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * cnt_nz_per_thread;
HYPRE_Int * cnt_nz_offd_per_thread;
/* HYPRE_Real wall_time;
wall_time = hypre_MPI_Wtime(); */
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for(i=0; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] = 0;
cnt_nz_per_thread[i] = 0;
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for A and S. Also get size of fine grid.
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/* total_global_cpts = 0; */
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
}
//col_map_offd = col_map_offd_A;
num_cols_offd = num_cols_offd_A;
if (num_cols_offd_A)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
}
if (num_cols_offd)
S_offd_j = hypre_CSRMatrixJ(S_offd);
n_fine = hypre_CSRMatrixNumRows(A_diag);
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
n_coarse = 0;
n_SF = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == 1) n_coarse++;
else if (CF_marker[i] == -3) n_SF++;
pass_array_size = n_fine-n_coarse-n_SF;
if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size, HYPRE_MEMORY_HOST);
pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1, HYPRE_MEMORY_HOST);
if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
if (send_map_start[num_sends])
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_start[num_sends], HYPRE_MEMORY_HOST);
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = CF_marker[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (num_functions > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = dof_func[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
n_coarse_offd = 0;
n_SF_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd; i++)
if (CF_marker_offd[i] == 1) n_coarse_offd++;
else if (CF_marker_offd[i] == -3) n_SF_offd++;
if (num_cols_offd)
{
assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------------
* First Pass: determine the maximal size of P, and elementsPerRow[i].
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Assigned points are points for which we know an interpolation
* formula already, and which are thus available to interpolate from.
* assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending
* in which pass their interpolation formula is determined.
*
* pass_array contains the points ordered according to its pass, i.e.
* | C-points | points of pass 1 | points of pass 2 | ....
* C_points are points 0 through pass_pointer[1]-1,
* points of pass k (0 < k < num_passes) are contained in points
* pass_pointer[k] through pass_pointer[k+1]-1 of pass_array .
*
* pass_array is also used to avoid going through all points for each pass,
* i,e. at the bginning it contains all points in descending order starting
* with n_fine-1. Then starting from the last point, we evaluate whether
* it is a C_point (pass 0). If it is the point is brought to the front
* and the length of the points to be searched is shortened. This is
* done until the parameter cnt (which determines the first point of
* pass_array to be searched) becomes n_fine. Then all points have been
* assigned a pass number.
*-----------------------------------------------------------------------*/
cnt = 0;
p_cnt = pass_array_size-1;
P_diag_i[0] = 0;
P_offd_i[0] = 0;
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt; /* this C point is assigned index
coarse_counter on coarse grid,
and in column of P */
C_array[cnt++] = i;
assigned[i] = 0;
P_diag_i[i+1] = 1; /* one element in row i1 of P */
P_offd_i[i+1] = 0;
}
else if (CF_marker[i] == -1)
{
pass_array[p_cnt--] = i;
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
else
{
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{
big_buf_data[index] = (HYPRE_BigInt)fine_to_coarse[send_map_elmt[j]];
if (big_buf_data[index] > -1)
big_buf_data[index] += my_first_cpt;
index++;
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
new_recv_vec_start = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
if (n_coarse_offd)
C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);
cnt = 0;
new_recv_vec_start[0] = 0;
for (j = 0; j < num_recvs; j++)
{
for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++)
{
if (CF_marker_offd[i] == 1)
{
map_S_to_new[i] = cnt;
C_array_offd[cnt] = i;
new_col_map_offd[cnt++] = fine_to_coarse_offd[i];
assigned_offd[i] = 0;
}
else
{
assigned_offd[i] = -1;
map_S_to_new[i] = -1;
}
}
new_recv_vec_start[j+1] = cnt;
}
cnt = 0;
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Mark all local neighbors of C points as 'assigned'.
*-----------------------------------------------------------------------*/
pass_pointer[0] = 0;
pass_pointer[1] = 0;
total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */
total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */
cnt = 0;
cnt_offd = 0;
cnt_nz = 0;
cnt_nz_offd = 0;
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{
P_diag_i[i1+1]++;
cnt_nz++;
assigned[i1] = 1;
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{
P_offd_i[i1+1]++;
cnt_nz_offd++;
assigned[i1] = 1;
}
}
if (assigned[i1] == 1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
}
}
pass_pointer[2] = cnt;
/*-----------------------------------------------------------------------
* All local neighbors are assigned, now need to exchange the boundary
* info for assigned strong neighbors.
*-----------------------------------------------------------------------*/
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*-----------------------------------------------------------------------
* Now we need to determine strong neighbors of points of pass 1, etc.
* we need to update assigned_offd after each pass
*-----------------------------------------------------------------------*/
pass = 2;
local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt);
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
while (global_pass_array_size && pass < max_num_passes)
{
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
no_break = 1;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
no_break = 0;
break;
}
}
if (no_break)
{
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
break;
}
}
}
}
/*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/
pass++;
pass_pointer[pass] = cnt;
local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt);
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
num_passes = pass;
P_diag_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); /* P_diag_pass[i] will contain
all column numbers for points of pass i */
P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST);
P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /* P_diag_start[i] contains
pointer to begin of column numbers in P_pass for point i,
P_diag_i[i+1] contains number of columns for point i */
P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
P_offd_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
if (cnt_nz_offd)
P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST);
else
P_offd_pass[1] = NULL;
new_elmts = hypre_CTAlloc(HYPRE_BigInt*, num_passes, HYPRE_MEMORY_HOST);
new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1, HYPRE_MEMORY_HOST);
new_counter[0] = 0;
new_counter[1] = n_coarse_offd;
new_num_cols_offd = n_coarse_offd;
new_elmts[0] = new_col_map_offd;
}
/*-----------------------------------------------------------------------
* Pass 1: now we consider points of pass 1, with strong C_neighbors,
*-----------------------------------------------------------------------*/
cnt_nz = 0;
cnt_nz_offd = 0;
/* JBS: Possible candidate for threading */
for (i=pass_pointer[1]; i < pass_pointer[2]; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{ P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; }
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{ P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; }
}
}
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
if (num_procs > 1)
{
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1, HYPRE_MEMORY_HOST);
if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
if (send_map_start[num_sends])
P_ncols = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd+1; i++)
{ Pext_i[i] = 0; }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < send_map_start[num_sends]; i++)
{ P_ncols[i] = 0; }
}
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
for (pass=2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
Pext_send_size = 0;
Pext_send_map_start[pass][0] = 0;
for (i=0; i < num_sends; i++)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE
#endif
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];
Pext_send_size += P_ncols[j];
}
}
Pext_send_map_start[pass][i+1] = Pext_send_size;
}
comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg,
P_ncols, &Pext_i[1]);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST);
Pext_send_buffer = hypre_CTAlloc(HYPRE_BigInt, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
}
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_buffer[cnt_offd++] = my_first_cpt
+ (HYPRE_BigInt) P_diag_pass[pass-1][k];
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
k3 = 0;
while (k3 < pass-1)
{
if (k1 < new_counter[k3+1])
{
k2 = k1-new_counter[k3];
Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2];
break;
}
k3++;
}
}
}
}
}
if (num_procs > 1)
{
Pext_recv_size = 0;
Pext_recv_vec_start[pass][0] = 0;
cnt_offd = 0;
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
Pext_start[j] = cnt_offd;
cnt_offd += Pext_i[j+1];
}
}
Pext_recv_size = cnt_offd;
Pext_recv_vec_start[pass][i+1] = Pext_recv_size;
}
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
if (Pext_recv_size)
{
Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST);
new_elmts[pass-1] = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST);
}
else
{
Pext_pass[pass] = NULL;
new_elmts[pass-1] = NULL;
}
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(loc, HYPRE_MEMORY_HOST);
loc = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST);
hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST);
big_temp_pass = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (21, tmp_comm_pkg,
Pext_send_buffer, big_temp_pass);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
cnt_new = 0;
cnt_offd = 0;
/* JBS: Possible candidate for threading */
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++)
{
big_k1 = big_temp_pass[j1];
k2 = (HYPRE_Int)(big_k1 - my_first_cpt);
if (k2 > -1 && k2 < n_coarse)
{ Pext_pass[pass][j1] = -k2-1; }
else
{
not_found = 1;
k3 = 0;
while (k3 < pass-1 && not_found)
{
k2 = hypre_BigBinarySearch(new_elmts[k3], big_k1,
(new_counter[k3+1]-new_counter[k3]));
if (k2 > -1)
{
Pext_pass[pass][j1] = k2 + new_counter[k3];
not_found = 0;
}
else
{
k3++;
}
}
if (not_found)
{
new_elmts[pass-1][cnt_new] = big_k1;
loc[cnt_new++] = j1;
}
}
}
cnt_offd += Pext_i[j+1];
}
}
}
if (cnt_new)
{
hypre_BigQsortbi(new_elmts[pass-1],loc,0,cnt_new-1);
cnt = 0;
local_index = new_counter[pass-1];
Pext_pass[pass][loc[0]] = local_index;
for (i=1; i < cnt_new; i++)
{
if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt])
{
new_elmts[pass-1][++cnt] = new_elmts[pass-1][i];
local_index++;
}
Pext_pass[pass][loc[i]] = local_index;
}
new_counter[pass] = local_index+1;
}
else if (num_procs > 1)
new_counter[pass] = new_counter[pass-1];
if (new_num_cols_offd < local_index+1)
{ new_num_cols_offd = local_index+1; }
pass_length = pass_pointer[pass+1] - pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd)
#endif
{
/* Thread by computing the sparsity structure for this pass only over
* each thread's range of rows. Rows are divided up evenly amongst
* the threads. The necessary thread-wise temporary arrays, like
* P_marker, are initialized and de-allocated internally to the
* parallel region. */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_length; }
else
{ thread_stop = (pass_length/num_threads)*(my_thread_num+1); }
thread_start += pass_pointer[pass];
thread_stop += pass_pointer[pass];
/* Local initializations */
cnt_nz = 0;
cnt_nz_offd = 0;
/* This block of code is to go to the top of the parallel region starting before
* the loop over num_passes. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); /* marks points to see if they're counted */
for (i=0; i < n_coarse; i++)
{ P_marker[i] = -1; }
if (new_num_cols_offd == local_index+1)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = -1; }
}
else if (n_coarse_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);
for (i=0; i < n_coarse_offd; i++)
{ P_marker_offd[i] = -1; }
}
/* Need some variables to store each threads cnt_nz and cnt_nz_offd, and
* then stitch things together as in par_interp.c
* This loop writes
* P_diag_i, P_offd_i: data parallel here, and require no special treatment
* P_diag_start, P_offd_start: are not data parallel, require special treatment
*/
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[k1] = i1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
j_start = 0;
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[-k1-1] = i1;
}
}
else if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
}
/* Update P_diag_start, P_offd_start with cumulative
* nonzero counts over all threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd;
cnt_nz_per_thread[my_thread_num] = cnt_nz;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
for(i = 1; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1];
cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update this thread's section of P_diag_start and P_offd_start
* with the num of nz's counted by previous threads */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1];
P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1];
}
}
else /* if my_thread_num == 0 */
{
/* Grab the nz count for all threads */
cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1];
cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1];
/* Updated total nz count */
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
/* Allocate P_diag_pass and P_offd_pass for all threads */
P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST);
if (cnt_nz_offd)
P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST);
else if (num_procs > 1)
P_offd_pass[pass] = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* offset cnt_nz and cnt_nz_offd to point to the starting
* point in P_diag_pass and P_offd_pass for each thread */
if(my_thread_num > 0)
{
cnt_nz = cnt_nz_per_thread[my_thread_num-1];
cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1];
}
else
{
cnt_nz = 0;
cnt_nz_offd = 0;
}
/* Set P_diag_pass and P_offd_pass */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = k1;
P_marker[k1] = -i1-1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = -k1-1;
P_marker[-k1-1] = -i1-1;
}
}
else if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) )
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End parallel region */
}
hypre_TFree(loc, HYPRE_MEMORY_HOST);
hypre_TFree(P_ncols, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST);
hypre_TFree(new_recv_vec_start, HYPRE_MEMORY_HOST);
hypre_TFree(cnt_nz_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(cnt_nz_offd_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, total_nz, HYPRE_MEMORY_DEVICE);
if (total_nz_offd)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, total_nz_offd, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, total_nz_offd, HYPRE_MEMORY_DEVICE);
}
for (i=0; i < n_fine; i++)
{
P_diag_i[i+1] += P_diag_i[i];
P_offd_i[i+1] += P_offd_i[i];
}
/* determine P for coarse points */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_coarse; i++)
{
i1 = C_array[i];
P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];
P_diag_data[P_diag_i[i1]] = 1.0;
}
if (weight_option) /*if this is set, weights are separated into
negative and positive offdiagonals and accumulated
accordingly */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_pos = 0;
sum_C_neg = 0;
sum_N_pos = 0;
sum_N_neg = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
P_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
if (j1 != -1 && P_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
if (A_diag_data[j] < 0)
sum_C_neg += A_diag_data[j];
else
sum_C_pos += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
P_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
{
if (A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
if (j1 != -1 && P_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
if (A_offd_data[j] < 0)
sum_C_neg += A_offd_data[j];
else
sum_C_pos += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End Parallel Region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_diag_data[k]; }
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_offd_data[k]; }
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
C_array = NULL;
C_array_offd = NULL;
if (n_coarse)
{ C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); }
if (new_num_cols_offd > n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else if (n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
/* Loop over each thread's row-range */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_neg = 0;
sum_C_pos = 0;
sum_N_neg = 0;
sum_N_pos = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
C_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
C_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
P_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
P_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (P_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[C_array[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
j1 = A_offd_j[j];
if (j1 > -1 && P_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[C_array[-k1-1]] += alfa;
else
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
{
if ( A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST);
}
} /* End num_passes for-loop */
}
else /* no distinction between positive and negative offdiagonal element */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
tmp_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
if (j1 != -1 && tmp_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
sum_C += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
tmp_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
sum_N += A_offd_data[j];
if (j1 != -1 && tmp_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
sum_C += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal != 0) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST);
} /* end OMP parallel region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_diag_data[k];
}
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_offd_data[k];
}
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
tmp_array = NULL;
if (n_coarse)
{ tmp_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); }
tmp_array_offd = NULL;
if (new_num_cols_offd > n_coarse_offd)
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);}
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
tmp_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
tmp_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
tmp_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
tmp_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (tmp_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[tmp_array[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
j1 = A_offd_j[j];
if (j1 > -1 && tmp_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[tmp_array[-k1-1]] += alfa;
else
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
sum_N += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_array, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_array_offd, HYPRE_MEMORY_HOST);
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST);
}
}
}
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_map_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_pass, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_start, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_i, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(assigned, HYPRE_MEMORY_HOST);
hypre_TFree(assigned_offd, HYPRE_MEMORY_HOST);
hypre_TFree(pass_pointer, HYPRE_MEMORY_HOST);
hypre_TFree(pass_array, HYPRE_MEMORY_HOST);
hypre_TFree(map_S_to_new, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max
and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */
if (trunc_factor != 0.0 || P_max_elmts != 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
}
P_offd_size = P_offd_i[n_fine];
num_cols_offd_P = 0;
if (P_offd_size)
{
if (new_num_cols_offd > num_cols_offd)
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = 0; }
num_cols_offd_P = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker_offd[index])
{
num_cols_offd_P++;
P_marker_offd[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P, HYPRE_MEMORY_HOST);
permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1], HYPRE_MEMORY_HOST);
big_permute = hypre_CTAlloc(HYPRE_BigInt, new_counter[num_passes-1], HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
big_permute[i] = -1;
cnt = 0;
for (i=0; i < num_passes-1; i++)
{
for (j=new_counter[i]; j < new_counter[i+1]; j++)
{
if (P_marker_offd[j])
{
col_map_offd_P[cnt] = new_elmts[i][j-(HYPRE_BigInt)new_counter[i]];
big_permute[j] = col_map_offd_P[cnt++];
}
}
}
hypre_BigQsort0(col_map_offd_P,0,num_cols_offd_P-1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
{
big_k1 = big_permute[i];
if (big_k1 != -1)
permute[i] = hypre_BigBinarySearch(col_map_offd_P,big_k1,num_cols_offd_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{ P_offd_j[i] = permute[P_offd_j[i]]; }
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
for (i=0; i < num_passes-1; i++)
hypre_TFree(new_elmts[i], HYPRE_MEMORY_HOST);
}
hypre_TFree(permute, HYPRE_MEMORY_HOST);
hypre_TFree(big_permute, HYPRE_MEMORY_HOST);
hypre_TFree(new_elmts, HYPRE_MEMORY_HOST);
hypre_TFree(new_counter, HYPRE_MEMORY_HOST);
if (num_cols_offd_P)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P;
}
if (n_SF)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(P);
}
*P_ptr = P;
/* wall_time = hypre_MPI_Wtime() - wall_time;
hypre_printf("TOTAL TIME %1.2e \n",wall_time); */
/*-----------------------------------------------------------------------
* Build and return dof_func array for coarse grid.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Free mapping vector and marker array.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime();
#endif
return(0);
}
|
test-math-vector-sincos.h | /* Wrappers definitions for tests of ABI of vector sincos/sincosf having
vector declaration "#pragma omp declare simd notinbranch".
Copyright (C) 2016-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#define INIT_VEC_PTRS_LOOP(vec, val, len) \
do \
{ \
for (i = 0; i < len; i++) \
{ \
vec[i] = &val[i]; \
} \
} \
while (0)
/* Wrapper for vector sincos/sincosf compatible with x86_64 and x32 variants
of _ZGVbN2vvv_sincos, _ZGVdN4vvv_sincos, _ZGVeN8vvv_sincos;
x32 variants of _ZGVbN4vvv_sincosf, _ZGVcN4vvv_sincos, _ZGVdN8vvv_sincosf,
_ZGVeN16vvv_sincosf. */
#define VECTOR_WRAPPER_fFF_2(scalar_func, vector_func) \
extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE); \
void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \
{ \
int i; \
FLOAT r_loc[VEC_LEN], r1_loc[VEC_LEN]; \
VEC_TYPE mx; \
VEC_INT_TYPE mr, mr1; \
INIT_VEC_LOOP (mx, x, VEC_LEN); \
INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN); \
INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN); \
vector_func (mx, mr, mr1); \
TEST_VEC_LOOP (r_loc, VEC_LEN); \
TEST_VEC_LOOP (r1_loc, VEC_LEN); \
*r = r_loc[0]; \
*r1 = r1_loc[0]; \
return; \
}
/* Wrapper for vector sincos/sincosf compatible with x86_64 variants of
_ZGVcN4vvv_sincos, _ZGVeN16vvv_sincosf, _ZGVbN4vvv_sincosf,
_ZGVdN8vvv_sincosf, _ZGVcN8vvv_sincosf. */
#define VECTOR_WRAPPER_fFF_3(scalar_func, vector_func) \
extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE); \
void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \
{ \
int i; \
FLOAT r_loc[VEC_LEN/2], r1_loc[VEC_LEN/2]; \
VEC_TYPE mx; \
VEC_INT_TYPE mr, mr1; \
INIT_VEC_LOOP (mx, x, VEC_LEN); \
INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/2); \
INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/2); \
vector_func (mx, mr, mr, mr1, mr1); \
TEST_VEC_LOOP (r_loc, VEC_LEN/2); \
TEST_VEC_LOOP (r1_loc, VEC_LEN/2); \
*r = r_loc[0]; \
*r1 = r1_loc[0]; \
return; \
}
/* Wrapper for vector sincosf compatible with x86_64 variant of
_ZGVcN8vvv_sincosf. */
#define VECTOR_WRAPPER_fFF_4(scalar_func, vector_func) \
extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE); \
void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \
{ \
int i; \
FLOAT r_loc[VEC_LEN/4], r1_loc[VEC_LEN/4]; \
VEC_TYPE mx; \
VEC_INT_TYPE mr, mr1; \
INIT_VEC_LOOP (mx, x, VEC_LEN); \
INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/4); \
INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/4); \
vector_func (mx, mr, mr, mr, mr, mr1, mr1, mr1, mr1); \
TEST_VEC_LOOP (r_loc, VEC_LEN/4); \
TEST_VEC_LOOP (r1_loc, VEC_LEN/4); \
*r = r_loc[0]; \
*r1 = r1_loc[0]; \
return; \
}
|
GB_binop__bshift_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint8)
// C=scalar+B GB (_bind1st__bshift_uint8)
// C=scalar+B' GB (_bind1st_tran__bshift_uint8)
// C=A+scalar GB (_bind2nd__bshift_uint8)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__asin_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fp32_fp32)
// op(A') function: GB (_unop_tran__asin_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = asinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = asinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = asinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = asinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simd_utils_avx512_int32.h | /*
* Project : SIMD_Utils
* Version : 0.1.12
* Author : JishinMaster
* Licence : BSD-2
*/
#pragma once
#include <stdint.h>
#include "immintrin.h"
static inline void add512s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_store_si512(dst + i, _mm512_add_epi32(_mm512_load_si512(src1 + i), _mm512_load_si512(src2 + i)));
}
} else {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_storeu_si512(dst + i, _mm512_add_epi32(_mm512_loadu_si512(src1 + i), _mm512_loadu_si512(src2 + i)));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] + src2[i];
}
}
static inline void mul512s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_store_si512(dst + i, _mm512_mul_epi32(_mm512_load_si512(src1 + i), _mm512_load_si512(src2 + i)));
}
} else {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_storeu_si512(dst + i, _mm512_mul_epi32(_mm512_loadu_si512(src1 + i), _mm512_loadu_si512(src2 + i)));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] * src2[i];
}
}
static inline void sub512s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_store_si512(dst + i, _mm512_sub_epi32(_mm512_load_si512(src1 + i), _mm512_load_si512(src2 + i)));
}
} else {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_storeu_si512(dst + i, _mm512_sub_epi32(_mm512_loadu_si512(src1 + i), _mm512_loadu_si512(src2 + i)));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] - src2[i];
}
}
static inline void addc512s(int32_t *src, int32_t value, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
const v16si tmp = _mm512_set1_epi32(value);
if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX512_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_store_si512(dst + i, _mm512_add_epi32(tmp, _mm512_load_si512(src + i)));
}
} else {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_storeu_si512(dst + i, _mm512_add_epi32(tmp, _mm512_loadu_si512(src + i)));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i] + value;
}
}
// Experimental
static inline void copy512s(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_store_si512((__m512i *) (dst + i), _mm512_load_si512((__m512i *) (src + i)));
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void copy512s_2(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (2 * AVX512_LEN_INT32);
stop_len *= (2 * AVX512_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 2 * AVX512_LEN_INT32) {
__m512i tmp1 = _mm512_load_si512((__m512i *) (src + i));
__m512i tmp2 = _mm512_load_si512((__m512i *) (src + i + AVX512_LEN_INT32));
_mm512_store_si512((__m512i *) (dst + i), tmp1);
_mm512_store_si512((__m512i *) (dst + i + AVX512_LEN_INT32), tmp2);
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy512s(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
_mm512_stream_si512((__m512i *) (dst + i), _mm512_stream_load_si512((__m512i *) (src + i)));
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy512s_2(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (2 * AVX512_LEN_INT32);
stop_len *= (2 * AVX512_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 2 * AVX512_LEN_INT32) {
__m512i tmp1 = _mm512_stream_load_si512((__m512i *) (src + i));
__m512i tmp2 = _mm512_stream_load_si512((__m512i *) (src + i + AVX512_LEN_INT32));
_mm512_stream_si512((__m512i *) (dst + i), tmp1);
_mm512_stream_si512((__m512i *) (dst + i + AVX512_LEN_INT32), tmp2);
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy512s_4(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (4 * AVX512_LEN_INT32);
stop_len *= (4 * AVX512_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 4 * AVX512_LEN_INT32) {
__m512i tmp1 = _mm512_stream_load_si512((__m512i *) (src + i));
__m512i tmp2 = _mm512_stream_load_si512((__m512i *) (src + i + AVX512_LEN_INT32));
__m512i tmp3 = _mm512_stream_load_si512((__m512i *) (src + i + 2 * AVX512_LEN_INT32));
__m512i tmp4 = _mm512_stream_load_si512((__m512i *) (src + i + 3 * AVX512_LEN_INT32));
_mm512_stream_si512((__m512i *) (dst + i), tmp1);
_mm512_stream_si512((__m512i *) (dst + i + AVX512_LEN_INT32), tmp2);
_mm512_stream_si512((__m512i *) (dst + i + 2 * AVX512_LEN_INT32), tmp3);
_mm512_stream_si512((__m512i *) (dst + i + 3 * AVX512_LEN_INT32), tmp4);
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
//to be improved?
static inline __m512i _mm512_absdiff_epi16(__m512i a, __m512i b)
{
__m512i cmp, difab, difba;
__m512i zero = _mm512_setzero_epi32();
__mmask64 cmp_mask = _mm512_cmpgt_epi16_mask(a,b);
cmp = _mm512_mask_set1_epi16(zero, cmp_mask, 0xFFFF);
difab = _mm512_sub_epi16(a,b);
difba = _mm512_sub_epi16 (b,a);
difab = _mm512_and_si512(cmp, difab);
difba = _mm512_andnot_si512(cmp, difba);
return _mm512_or_si512(difab, difba);
}
static inline __m512i _mm512_absdiff_epi32(__m512i a, __m512i b)
{
__m512i cmp, difab, difba;
__m512i zero = _mm512_setzero_epi32();
__mmask64 cmp_mask = _mm512_cmpgt_epi32_mask(a,b);
cmp = _mm512_mask_set1_epi32(zero, cmp_mask, 0xFFFFFFFF);
difab = _mm512_sub_epi32(a,b);
difba = _mm512_sub_epi32 (b,a);
difab = _mm512_and_si512(cmp, difab);
difba = _mm512_andnot_si512(cmp, difba);
return _mm512_or_si512(difab, difba);
}
static inline __m512i _mm512_absdiff_epi8(__m512i a, __m512i b)
{
__m512i cmp, difab, difba;
__m512i zero = _mm512_setzero_epi32();
__mmask64 cmp_mask = _mm512_cmpgt_epi8_mask(a,b);
cmp = _mm512_mask_set1_epi8(zero, cmp_mask, 0xFF);
difab = _mm512_sub_epi8(a,b);
difba = _mm512_sub_epi8 (b,a);
difab = _mm512_and_si512(cmp, difab);
difba = _mm512_andnot_si512(cmp, difba);
return _mm512_or_si512(difab, difba);
}
static inline void absdiff16s_512s(int16_t *src1, int16_t *src2, int16_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT16;
stop_len *= AVX512_LEN_INT16;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT16) {
__m512i a = _mm512_load_si512((__m512i *) (src1 + i));
__m512i b = _mm512_load_si512((__m512i *) (src2 + i));
_mm512_store_si512((__m512i *)(dst + i), _mm512_absdiff_epi16(a,b));
}
} else {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT16) {
__m512i a = _mm512_loadu_si512((__m512i *) (src1 + i));
__m512i b = _mm512_loadu_si512((__m512i *) (src2 + i));
_mm512_storeu_si512((__m512i *) (dst + i), _mm512_absdiff_epi16(a,b));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = abs(src1[i] - src2[i]);
}
}
static inline void powerspect16s_512s_interleaved(complex16s_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX512_LEN_INT32;
stop_len *= AVX512_LEN_INT32;
int j = 0;
if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX512_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
__m512i reim = _mm512_load_si512((__m512i *)((const int16_t *)src + j));
// print8i(reim); printf("\n");
_mm512_store_si512((__m512i*)(dst + i), _mm512_madd_epi16 (reim, reim));
j += AVX512_LEN_INT16;
}
} else {
for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) {
__m512i reim = _mm512_loadu_si512((__m512i *)((const int16_t *)src + j));
_mm512_storeu_si512((__m512i*)(dst + i), _mm512_madd_epi16 (reim, reim));
j += AVX512_LEN_INT16;
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = (int32_t)src[i].re * (int32_t)src[i].re + (int32_t)src[i].im * (int32_t)src[i].im;
}
}
|
laplacian.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cblas.h>
#include <memory.h>
#include <omp.h>
#include "eigenmap.h"
static void diag(double *d, const double *w, int n_patch);
static void compute_l(double *w, int n_patch);
/*
* laplacian computes the Laplacian matrix based on the weight matrix.
*
* w: the weight matrix
* n_patch: the dimension of dev_w and dev_l
* Note: the Laplacian matrix is computed in-place and overwrites w.
*/
void laplacian(double *w, int n_patch)
{
double *d = (double *)calloc(n_patch, sizeof(double));
int i;
// Compute diagonal matrix
diag(d, w, n_patch);
// W <- D^(-1/2) * W * D^(-1/2)
#pragma omp parallel for private(i) shared(w, d) firstprivate(n_patch)
for (i = 0; i < n_patch; i++)
cblas_dscal(n_patch, d[i], &w[i], n_patch);
#pragma omp parallel for private(i) shared(w, d) firstprivate(n_patch)
for (i = 0; i < n_patch; i++)
cblas_dscal(n_patch, d[i], &w[i * n_patch], 1);
// L <- I - W
compute_l(w, n_patch);
free(d);
}
static void diag(double *d, const double *w, int n_patch)
{
int i, j;
double sum;
for (j = 0; j < n_patch; j++){
sum = 0;
#pragma omp parallel for reduction(+:sum) firstprivate(j, n_patch) \
shared(w) private(i)
for (i = 0; i < n_patch; i++)
sum += w[i + j * n_patch];
d[j] = 1 / sqrt(sum);
}
}
static void compute_l(double *w, int n_patch)
{
int N = n_patch * n_patch;
int i;
#pragma omp parallel for private(i) shared(w) firstprivate(n_patch, N)
for (i = 0; i < N; i++)
w[i] = ((i % (n_patch + 1) == 0) ? 1.0 : 0.0) - w[i];
}
|
gengrupos_p.c | /**********************************************************************************************************
* AC - OpenMP -- PARALELA *
* Compilar con el modulo fun_p.c y la opcion -lm *
* gengrupos_s.c *
* *
* Entrada: dbgen.dat fichero con la informacion vserie de cada muestra *
* dbenf.dat fichero con la informacion sobre las enfermedades de cada muestra *
* Salida: dbgen_s.out centroides, densidad, analisis *
**********************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include "defineg.h"
#include "fun.h"
float elem[MAXE][NCAR]; // Elementos (muestras) a procesar
struct lista_grupos listag[NGRUPOS]; // Lista de elementos de los grupos
float enf[MAXE][TENF]; // Enfermedades asociadas a las muestras
struct analisis prob_enf[TENF]; // Analisis de los tipos de enfermedades
// Programa principal
int main (int argc, char *argv[]) {
float cent[NGRUPOS][NCAR], newcent[NGRUPOS][NCAR]; // Centroides
float densidad[NGRUPOS]; // Densidad de cada cluster
int popul[MAXE]; // Grupo de cada elemento
double additions[NGRUPOS][NCAR+1];
int i, j, nelem, grupo, num;
int fin = 0, num_ite = 0;
double discent;
FILE *fd;
struct timespec t1, t2, t3;
double tlec, tclu, tord, tden, tenf, tesc, texe;
if ((argc < 3) || (argc > 4)) {
printf("ERROR: gengrupos bd_muestras bd_enfermedades [num_elem]\n");
exit(-1);
}
printf("\n >> Ejecucion paralela\n");
// Tiempo de inicio del programa
clock_gettime(CLOCK_REALTIME, &t1);
// Lectura de datos (muestras): elem[i][j]
// Tiempo de inicio de lectura
clock_gettime(CLOCK_REALTIME, &t2);
fd = fopen(argv[1], "r");
if (fd == NULL) {
printf("Error al abrir el fichero %s\n", argv[1]);
exit(-1);
}
// 4. parametro: Numero de elementos
fscanf(fd, "%d", &nelem);
if (argc == 4) {
nelem = atoi(argv[3]);
}
// EXPLICACION
// No se puede paralelizar el escaneo de un fichero, ya que a medida que el for va iterando, se van leyendo los nelem
// elementos. No hay forma de acceder a los datos del fichero en forma de índices, por lo que no se puede dividir la
// carga de trabajo de este for.
for (i = 0; i < nelem; i++) {
for (j = 0; j < NCAR; j++) {
fscanf(fd, "%f", &(elem[i][j]));
}
}
fclose(fd);
// Lectura de datos (enfermedades): enf[i][j]
fd = fopen (argv[2], "r");
if (fd == NULL) {
printf("Error al abrir el fichero %s\n", argv[2]);
exit(-1);
}
// EXPLICACION
// No se puede paralelizar el escaneo de un fichero, ya que a medida que el for va iterando, se van leyendo los nelem
// elementos. No hay forma de acceder a los datos del fichero en forma de índices, por lo que no se puede dividir la
// carga de trabajo de este for.
for (i = 0; i < nelem; i++) {
for (j = 0; j < TENF; j++)
fscanf(fd, "%f", &(enf[i][j]));
}
fclose(fd);
// Tiempo de finalización de lectura y cálculo
clock_gettime (CLOCK_REALTIME, &t3);
tlec = (t3.tv_sec-t2.tv_sec) + (t3.tv_nsec-t2.tv_nsec)/(double)1e9;
// Tiempo de inicio de clustering
clock_gettime (CLOCK_REALTIME, &t2);
// Generacion de los primeros centroides de forma aleatoria
srand (147);
for (i = 0; i < NGRUPOS; i++) {
for (j = 0; j < NCAR / 2; j++) {
cent[i][j] = (rand() % 10000) / 100.0;
cent[i][j + (NCAR / 2)] = cent[i][j];
}
}
// 1. fase: Clasificar los elementos y calcular los nuevos centroides
num_ite = 0; fin = 0;
while ((fin == 0) && (num_ite < MAXIT)) {
// Calcular el grupo mas cercano
grupo_cercano (nelem, elem, cent, popul);
#pragma omp parallel num_threads(32)
{
// Calcular los nuevos centroides de los grupos
// Media de cada caracteristica
// Acumular los valores de cada caracteristica (100); numero de elementos al final
#pragma omp for private(i, j) schedule(dynamic,2)
for (i = 0; i < NGRUPOS; i++) {
for (j = 0; j < NCAR + 1; j++) {
additions[i][j] = 0.0;
}
}
#pragma omp single
{
for (i = 0; i < nelem; i++) {
for (j = 0; j < NCAR; j++) {
additions[popul[i]][j] += elem[i][j];
}
additions[popul[i]][NCAR]++;
}
fin = 1;
}
// Calcular los nuevos centroides y decidir si el proceso ha finalizado o no (en funcion de DELTA)
#pragma omp for private(i, j, discent) schedule(dynamic,2)
for (i = 0; i < NGRUPOS; i++) {
// Ese grupo (cluster) no esta vacio
if (additions[i][NCAR] > 0) {
for (j = 0; j < NCAR; j++) {
newcent[i][j] = additions[i][j] / additions[i][NCAR];
}
// Decidir si el proceso ha finalizado
discent = gendist (&newcent[i][0], ¢[i][0]);
// En alguna centroide hay cambios; continuar
if (discent > DELTA) {
fin = 0;
}
// Copiar los nuevos centroides
for (j = 0; j < NCAR; j++) {
cent[i][j] = newcent[i][j];
}
}
}
#pragma omp single
{
num_ite++;
}
}
} // while
// Tiempo de finalización de clustering y cálculo
clock_gettime(CLOCK_REALTIME, &t3);
tclu = (t3.tv_sec-t2.tv_sec) + (t3.tv_nsec-t2.tv_nsec)/(double)1e9;
// 2. fase: Numero de elementos de cada grupo; densidad; analisis enfermedades
// Tiempo de inicio de ordenacion
clock_gettime(CLOCK_REALTIME, &t2);
#pragma omp parallel for private(i) schedule(static,2) num_threads(8)
for (i = 0; i < NGRUPOS; i++) {
listag[i].nelemg = 0;
}
// Numero de elementos y su clasificacion
// EXPLICACION
// Debido a las dependencias sucesivas entre todos los elementos del for, no es posible paralelizarlo ya que tendríamos
// que utilizar diversas secciones crítcas.
for (i = 0; i < nelem; i++) {
grupo = popul[i];
num = listag[grupo].nelemg;
listag[grupo].elemg[num] = i; // Elementos de cada grupo (cluster)
listag[grupo].nelemg++;
}
// Tiempo de finalización de ordenación y cálculo
clock_gettime(CLOCK_REALTIME, &t3);
tord = (t3.tv_sec-t2.tv_sec) + (t3.tv_nsec-t2.tv_nsec)/(double)1e9;
// Densidad de cada cluster: media de las distancias entre todos los elementos
// Tiempo de inicio de densidad
clock_gettime(CLOCK_REALTIME, &t2);
calcular_densidad (elem, listag, densidad);
// Tiempo de finalización de densidad y cálculo
clock_gettime(CLOCK_REALTIME, &t3);
tden = (t3.tv_sec-t2.tv_sec) + (t3.tv_nsec-t2.tv_nsec)/(double)1e9;
// Analisis de enfermedades
// Tiempo de inicio de enfermedades
clock_gettime(CLOCK_REALTIME, &t2);
analizar_enfermedades (listag, enf, prob_enf);
// Tiempo de finalización de enfermedades y cálculo
clock_gettime(CLOCK_REALTIME, &t3);
tenf = (t3.tv_sec-t2.tv_sec) + (t3.tv_nsec-t2.tv_nsec)/(double)1e9;
// Escritura de resultados en el fichero de salida
// Tiempo de inicio de escritura
clock_gettime(CLOCK_REALTIME, &t2);
fd = fopen ("dbgen_p.out", "w");
if (fd == NULL) {
printf ("Error al abrir el fichero dbgen_p.out\n");
exit (-1);
}
fprintf (fd,">> Centroides de los clusters\n\n");
for (i=0; i<NGRUPOS; i++) {
for (j=0; j<NCAR; j++) fprintf (fd, "%7.3f", cent[i][j]);
fprintf (fd,"\n");
}
fprintf (fd,"\n\n>> Numero de elementos de cada cluster y densidad del cluster\n\n");
for (i=0; i<NGRUPOS; i++) {
fprintf(fd, " %6d %.3f \n", listag[i].nelemg, densidad[i]);
}
fprintf (fd,"\n\n>> Analisis de enfermedades en los grupos\n\n");
for (i=0; i<TENF; i++) {
fprintf(fd, "Enfermedad: %2d - max: %4.2f (grupo %2d) - min: %4.2f (grupo %2d)\n",
i, prob_enf[i].max, prob_enf[i].gmax, prob_enf[i].min, prob_enf[i].gmin);
}
fclose (fd);
// Tiempo de finalización de escritura, total y cálculos
clock_gettime(CLOCK_REALTIME, &t3);
tenf = (t3.tv_sec-t2.tv_sec) + (t3.tv_nsec-t2.tv_nsec)/(double)1e9;
texe = (t3.tv_sec-t1.tv_sec) + (t3.tv_nsec-t1.tv_nsec)/(double)1e9;
// Mostrar por pantalla algunos resultados
printf ("\n>> Centroides 0, 40 y 80, y su valor de densidad\n ");
for (i = 0; i < NGRUPOS; i+=40) {
printf("\n cent%2d -- ", i);
for (j = 0; j < NCAR; j++) {
printf("%5.1f", cent[i][j]);
}
printf("\n %5.6f\n", densidad[i]);
}
printf("\n>> Tamano de los grupos \n");
for (i = 0; i < 10; i++) {
for (j = 0; j < 10; j++) {
printf("%7d", listag[10*i+j].nelemg);
}
printf("\n");
}
printf ("\n>> Analisis de enfermedades en los grupos\n");
for (i = 0; i < TENF; i++) {
printf("Enfermedad: %2d - max: %4.2f (grupo %2d) - min: %4.2f (grupo %2d)\n",
i, prob_enf[i].max, prob_enf[i].gmax, prob_enf[i].min, prob_enf[i].gmin);
}
printf ("\n >> Numero de iteraciones: %d\n", num_ite);
printf ("\n >> Tiempos de ejecución: ");
printf ("\n - Lectura: %11.3f s", tlec);
printf ("\n - Clustering: %8.3f s", tclu);
printf ("\n - Ordenación: %8.3f s", tord);
printf ("\n - Densidad: %10.3f s", tden);
printf ("\n - Enfermedades: %6.3f s", tenf);
printf ("\n - Escritura: %9.3f s", tesc);
printf ("\n - Total: %13.3f s\n\n", texe);
return 0;
}
|
vmul.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
#pragma omp declare target
void vmul(int*a, int*b, int*c, int N);
#pragma omp end declare target
int main(){
const int N = 100000;
int a[N],b[N],c[N],validate[N];
int flag=-1; // Mark Success
for(int i=0;i<N;i++) {
a[i]=i+1;
b[i]=i+2;
validate[i]=a[i]*b[i];
}
#pragma omp target
{
vmul(a,b,c,N);
}
for(int i=0;i<N;i++) {
if(c[i]!=validate[i]) {
// print 1st bad index
if( flag == -1 )
printf("First fail: c[%d](%d) != validate[%d](%d)\n",i,c[i],i,validate[i]);
flag = i;
}
}
if( flag == -1 ){
printf("Success\n");
return 0;
} else {
printf("Last fail: c[%d](%d) != validate[%d](%d)\n",flag,c[flag],flag,validate[flag]);
printf("Fail\n");
return 1;
}
}
|
single_private.c | #include <stdio.h>
#include "omp_testsuite.h"
int
check_single_private (FILE * logFile)
{
int nr_threads_in_single = 0;
int result = 0;
int myresult = 0;
int myit = 0;
int nr_iterations = 0;
int i;
#pragma omp parallel private(i,myresult,myit)
{
myresult = 0;
myit = 0;
for (i = 0; i < LOOPCOUNT; i++)
{
#pragma omp single private(nr_threads_in_single) nowait
{
nr_threads_in_single = 0;
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
myit++;
/* nr_threads_in_single--; */
myresult = myresult + nr_threads_in_single;
} /* end of single */
} /* end of for */
#pragma omp critical
{
/* result += myresult; */
result += nr_threads_in_single;
nr_iterations += myit;
}
} /* end of parallel */
return (result == 0) && (nr_iterations == LOOPCOUNT);
} /* end of check_single private */
int
crosscheck_single_private (FILE * logFile)
{
int nr_threads_in_single = 0;
int result = 0;
int myresult = 0;
int myit = 0;
int nr_iterations = 0;
int i;
#pragma omp parallel private(i,myresult,myit)
{
myresult = 0;
myit = 0;
for (i = 0; i < LOOPCOUNT; i++)
{
#pragma omp single nowait
{
nr_threads_in_single = 0;
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
myit++;
/* nr_threads_in_single--; */
myresult = myresult + nr_threads_in_single;
} /* end of single */
} /* end of for */
#pragma omp critical
{
result += nr_threads_in_single;
nr_iterations += myit;
}
} /* end of parallel */
return (result == 0) && (nr_iterations == LOOPCOUNT);
} /* end of check_single private */
|
gridify-4.c | #define THE_LOOP \
for (i = j + 1; i < n; i += 3) \
a[i] = i
void __attribute__((noinline, noclone))
foo (int j, int n, int *a)
{
#pragma omp parallel
{
#pragma omp single
{
int i;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for shared(a) firstprivate(n) private(i) firstprivate(j)
THE_LOOP;
}
}
}
void __attribute__((noinline, noclone))
bar (int j, int n, int *a)
{
int i;
THE_LOOP;
}
int main (int argc, char **argv)
{
int n = 32;
int *a = __builtin_malloc (sizeof (int) * n);
int *ref = __builtin_malloc (sizeof (int) * n);
int i, j = 4;
__builtin_memset (a, 0, sizeof (int) * n);
__builtin_memset (ref, 0, sizeof (int) * n);
bar (j, n, ref);
foo (j, n, a);
for (i = 0; i < n; i ++)
{
if (a[i] != ref[i])
__builtin_abort ();
}
return 0;
}
|
diagsm_x_bsr_u_row.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->rows * A->block_size; ++r)
{
for (ALPHA_INT c = 0; c < columns; ++c)
{
alpha_mul(y[index2(r, c, ldy)] , alpha , x[index2(r, c, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
prop3DAcoTTIDenQ_DEO2_FDTD.h | #ifndef PROP3DACOTTIDENQ_DEO2_FDTD_H
#define PROP3DACOTTIDENQ_DEO2_FDTD_H
#include <omp.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <fftw3.h>
#include <complex>
#define MIN(x,y) ((x)<(y)?(x):(y))
class Prop3DAcoTTIDenQ_DEO2_FDTD {
public:
const bool _freeSurface;
const long _nbx, _nby, _nbz, _nthread, _nx, _ny, _nz, _nsponge;
const float _dx, _dy, _dz, _dt;
const float _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz;
const float _fDefault = 0.85f;
float * __restrict__ _v = NULL;
float * __restrict__ _eps = NULL;
float * __restrict__ _eta = NULL;
float * __restrict__ _b = NULL;
float * __restrict__ _sinTheta = NULL;
float * __restrict__ _cosTheta = NULL;
float * __restrict__ _sinPhi = NULL;
float * __restrict__ _cosPhi = NULL;
float * __restrict__ _f = NULL;
float * __restrict__ _dtOmegaInvQ = NULL;
float * __restrict__ _pSpace = NULL;
float * __restrict__ _mSpace = NULL;
float * __restrict__ _tmpPg1a = NULL;
float * __restrict__ _tmpPg2a = NULL;
float * __restrict__ _tmpPg3a = NULL;
float * __restrict__ _tmpMg1a = NULL;
float * __restrict__ _tmpMg2a = NULL;
float * __restrict__ _tmpMg3a = NULL;
float * __restrict__ _tmpPg1b = NULL;
float * __restrict__ _tmpPg2b = NULL;
float * __restrict__ _tmpPg3b = NULL;
float * __restrict__ _tmpMg1b = NULL;
float * __restrict__ _tmpMg2b = NULL;
float * __restrict__ _tmpMg3b = NULL;
float * _pOld = NULL;
float * _pCur = NULL;
float * _mOld = NULL;
float * _mCur = NULL;
Prop3DAcoTTIDenQ_DEO2_FDTD(
bool freeSurface,
long nthread,
long nx,
long ny,
long nz,
long nsponge,
float dx,
float dy,
float dz,
float dt,
const long nbx,
const long nby,
const long nbz) :
_freeSurface(freeSurface),
_nthread(nthread),
_nx(nx),
_ny(ny),
_nz(nz),
_nsponge(nsponge),
_nbx(nbx),
_nby(nby),
_nbz(nbz),
_dx(dx),
_dy(dy),
_dz(dz),
_dt(dt),
_c8_1(+1225.0 / 1024.0),
_c8_2(-245.0 / 3072.0),
_c8_3(+49.0 / 5120.0),
_c8_4(-5.0 / 7168.0),
_invDx(1.0 / _dx),
_invDy(1.0 / _dy),
_invDz(1.0 / _dz) {
// Allocate arrays
_v = new float[_nx * _ny * _nz];
_eps = new float[_nx * _ny * _nz];
_eta = new float[_nx * _ny * _nz];
_b = new float[_nx * _ny * _nz];
_sinTheta = new float[_nx * _ny * _nz];
_cosTheta = new float[_nx * _ny * _nz];
_sinPhi = new float[_nx * _ny * _nz];
_cosPhi = new float[_nx * _ny * _nz];
_f = new float[_nx * _ny * _nz];
_dtOmegaInvQ = new float[_nx * _ny * _nz];
_pSpace = new float[_nx * _ny * _nz];
_mSpace = new float[_nx * _ny * _nz];
_tmpPg1a = new float[_nx * _ny * _nz];
_tmpPg2a = new float[_nx * _ny * _nz];
_tmpPg3a = new float[_nx * _ny * _nz];
_tmpMg1a = new float[_nx * _ny * _nz];
_tmpMg2a = new float[_nx * _ny * _nz];
_tmpMg3a = new float[_nx * _ny * _nz];
_tmpPg1b = new float[_nx * _ny * _nz];
_tmpPg2b = new float[_nx * _ny * _nz];
_tmpPg3b = new float[_nx * _ny * _nz];
_tmpMg1b = new float[_nx * _ny * _nz];
_tmpMg2b = new float[_nx * _ny * _nz];
_tmpMg3b = new float[_nx * _ny * _nz];
_pOld = new float[_nx * _ny * _nz];
_pCur = new float[_nx * _ny * _nz];
_mOld = new float[_nx * _ny * _nz];
_mCur = new float[_nx * _ny * _nz];
numaFirstTouch(_nx, _ny, _nz, _nthread, _v, _eps, _eta, _b,
_sinTheta, _cosTheta, _sinPhi, _cosPhi, _f, _dtOmegaInvQ, _pSpace, _mSpace,
_tmpPg1a, _tmpPg2a, _tmpPg3a, _tmpMg1a, _tmpMg2a, _tmpMg3a,
_tmpPg1b, _tmpPg2b, _tmpPg3b, _tmpMg1b, _tmpMg2b, _tmpMg3b,
_pOld, _pCur, _mOld, _mCur, _nbx, _nby, _nbz);
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void numaFirstTouch(
const long nx,
const long ny,
const long nz,
const long nthread,
float * __restrict__ v,
float * __restrict__ eps,
float * __restrict__ eta,
float * __restrict__ b,
float * __restrict__ sinTheta,
float * __restrict__ cosTheta,
float * __restrict__ sinPhi,
float * __restrict__ cosPhi,
float * __restrict__ f,
float * __restrict__ dtOmegaInvQ,
float * __restrict__ pSpace,
float * __restrict__ mSpace,
float * __restrict__ tmpPg1a,
float * __restrict__ tmpPg2a,
float * __restrict__ tmpPg3a,
float * __restrict__ tmpMg1a,
float * __restrict__ tmpMg2a,
float * __restrict__ tmpMg3a,
float * __restrict__ tmpPg1b,
float * __restrict__ tmpPg2b,
float * __restrict__ tmpPg3b,
float * __restrict__ tmpMg1b,
float * __restrict__ tmpMg2b,
float * __restrict__ tmpMg3b,
float * __restrict__ pOld,
float * __restrict__ pCur,
float * __restrict__ mOld,
float * __restrict__ mCur,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
v[k] = 0;
eps[k] = 0;
eta[k] = 0;
b[k] = 0;
sinTheta[k] = 0;
cosTheta[k] = 0;
sinPhi[k] = 0;
cosPhi[k] = 0;
f[k] = 0;
dtOmegaInvQ[k] = 0;
pSpace[k] = 0;
mSpace[k] = 0;
tmpPg1a[k] = 0;
tmpPg2a[k] = 0;
tmpPg3a[k] = 0;
tmpMg1a[k] = 0;
tmpMg2a[k] = 0;
tmpMg3a[k] = 0;
tmpPg1b[k] = 0;
tmpPg2b[k] = 0;
tmpPg3b[k] = 0;
tmpMg1b[k] = 0;
tmpMg2b[k] = 0;
tmpMg3b[k] = 0;
pOld[k] = 0;
pCur[k] = 0;
mOld[k] = 0;
mCur[k] = 0;
}
}
}
}
}
}
// annulus
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
v[kindex1] = eps[kindex1] = eta[kindex1] = b[kindex1] = sinTheta[kindex1] =
cosTheta[kindex1] = sinPhi[kindex1] = cosPhi[kindex1] = f[kindex1] =
dtOmegaInvQ[kindex1] = pSpace[kindex1] = mSpace[kindex1] = tmpPg1a[kindex1] =
tmpPg2a[kindex1] = tmpPg3a[kindex1] = tmpMg1a[kindex1] = tmpMg2a[kindex1] =
tmpMg3a[kindex1] = tmpPg1b[kindex1] = tmpPg2b[kindex1] = tmpPg3b[kindex1] =
tmpMg1b[kindex1] = tmpMg2b[kindex1] = tmpMg3b[kindex1] = pOld[kindex1] =
pCur[kindex1] = mOld[kindex1] = mCur[kindex1] = 0;
v[kindex2] = eps[kindex2] = eta[kindex2] = b[kindex2] = sinTheta[kindex2] =
cosTheta[kindex2] = sinPhi[kindex2] = cosPhi[kindex2] = f[kindex2] =
dtOmegaInvQ[kindex2] = pSpace[kindex2] = mSpace[kindex2] = tmpPg1a[kindex2] =
tmpPg2a[kindex2] = tmpPg3a[kindex2] = tmpMg1a[kindex2] = tmpMg2a[kindex2] =
tmpMg3a[kindex2] = tmpPg1b[kindex2] = tmpPg2b[kindex2] = tmpPg3b[kindex2] =
tmpMg1b[kindex2] = tmpMg2b[kindex2] = tmpMg3b[kindex2] = pOld[kindex2] =
pCur[kindex2] = mOld[kindex2] = mCur[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
v[kindex1] = eps[kindex1] = eta[kindex1] = b[kindex1] = sinTheta[kindex1] =
cosTheta[kindex1] = sinPhi[kindex1] = cosPhi[kindex1] = f[kindex1] =
dtOmegaInvQ[kindex1] = pSpace[kindex1] = mSpace[kindex1] = tmpPg1a[kindex1] =
tmpPg2a[kindex1] = tmpPg3a[kindex1] = tmpMg1a[kindex1] = tmpMg2a[kindex1] =
tmpMg3a[kindex1] = tmpPg1b[kindex1] = tmpPg2b[kindex1] = tmpPg3b[kindex1] =
tmpMg1b[kindex1] = tmpMg2b[kindex1] = tmpMg3b[kindex1] = pOld[kindex1] =
pCur[kindex1] = mOld[kindex1] = mCur[kindex1] = 0;
v[kindex2] = eps[kindex2] = eta[kindex2] = b[kindex2] = sinTheta[kindex2] =
cosTheta[kindex2] = sinPhi[kindex2] = cosPhi[kindex2] = f[kindex2] =
dtOmegaInvQ[kindex2] = pSpace[kindex2] = mSpace[kindex2] = tmpPg1a[kindex2] =
tmpPg2a[kindex2] = tmpPg3a[kindex2] = tmpMg1a[kindex2] = tmpMg2a[kindex2] =
tmpMg3a[kindex2] = tmpPg1b[kindex2] = tmpPg2b[kindex2] = tmpPg3b[kindex2] =
tmpMg1b[kindex2] = tmpMg2b[kindex2] = tmpMg3b[kindex2] = pOld[kindex2] =
pCur[kindex2] = mOld[kindex2] = mCur[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
v[kindex1] = eps[kindex1] = eta[kindex1] = b[kindex1] = sinTheta[kindex1] =
cosTheta[kindex1] = sinPhi[kindex1] = cosPhi[kindex1] = f[kindex1] =
dtOmegaInvQ[kindex1] = pSpace[kindex1] = mSpace[kindex1] = tmpPg1a[kindex1] =
tmpPg2a[kindex1] = tmpPg3a[kindex1] = tmpMg1a[kindex1] = tmpMg2a[kindex1] =
tmpMg3a[kindex1] = tmpPg1b[kindex1] = tmpPg2b[kindex1] = tmpPg3b[kindex1] =
tmpMg1b[kindex1] = tmpMg2b[kindex1] = tmpMg3b[kindex1] = pOld[kindex1] =
pCur[kindex1] = mOld[kindex1] = mCur[kindex1] = 0;
v[kindex2] = eps[kindex2] = eta[kindex2] = b[kindex2] = sinTheta[kindex2] =
cosTheta[kindex2] = sinPhi[kindex2] = cosPhi[kindex2] = f[kindex2] =
dtOmegaInvQ[kindex2] = pSpace[kindex2] = mSpace[kindex2] = tmpPg1a[kindex2] =
tmpPg2a[kindex2] = tmpPg3a[kindex2] = tmpMg1a[kindex2] = tmpMg2a[kindex2] =
tmpMg3a[kindex2] = tmpPg1b[kindex2] = tmpPg2b[kindex2] = tmpPg3b[kindex2] =
tmpMg1b[kindex2] = tmpMg2b[kindex2] = tmpMg3b[kindex2] = pOld[kindex2] =
pCur[kindex2] = mOld[kindex2] = mCur[kindex2] = 0;
}
}
}
}
~Prop3DAcoTTIDenQ_DEO2_FDTD() {
if (_v != NULL) delete [] _v;
if (_eps != NULL) delete [] _eps;
if (_eta != NULL) delete [] _eta;
if (_sinTheta != NULL) delete [] _sinTheta;
if (_cosTheta != NULL) delete [] _cosTheta;
if (_sinPhi != NULL) delete [] _sinPhi;
if (_cosPhi != NULL) delete [] _cosPhi;
if (_b != NULL) delete [] _b;
if (_f != NULL) delete [] _f;
if (_dtOmegaInvQ != NULL) delete [] _dtOmegaInvQ;
if (_pSpace != NULL) delete [] _pSpace;
if (_mSpace != NULL) delete [] _mSpace;
if (_tmpPg1a != NULL) delete [] _tmpPg1a;
if (_tmpPg2a != NULL) delete [] _tmpPg2a;
if (_tmpPg3a != NULL) delete [] _tmpPg3a;
if (_tmpMg1a != NULL) delete [] _tmpMg1a;
if (_tmpMg2a != NULL) delete [] _tmpMg2a;
if (_tmpMg3a != NULL) delete [] _tmpMg3a;
if (_tmpPg1b != NULL) delete [] _tmpPg1b;
if (_tmpPg2b != NULL) delete [] _tmpPg2b;
if (_tmpPg3b != NULL) delete [] _tmpPg3b;
if (_tmpMg1b != NULL) delete [] _tmpMg1b;
if (_tmpMg2b != NULL) delete [] _tmpMg2b;
if (_tmpMg3b != NULL) delete [] _tmpMg3b;
if (_pOld != NULL) delete [] _pOld;
if (_pCur != NULL) delete [] _pCur;
if (_mOld != NULL) delete [] _mOld;
if (_mCur != NULL) delete [] _mCur;
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
void info() {
printf("\n");
printf("Prop3DAcoTTIDenQ_DEO2_FDTD\n");
printf(" nx,ny,nz; %5ld %5ld %5ld\n", _nx, _ny, _nz);
printf(" nthread,nsponge,fs; %5ld %5ld %5d\n", _nthread, _nsponge, _freeSurface);
printf(" X min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dx * (_nx - 1), _dx);
printf(" Y min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dy * (_ny - 1), _dy);
printf(" Z min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dz * (_nz - 1), _dz);
}
/**
* Notes
* - User must have called setupDtOmegaInvQ_2D to initialize the array _dtOmegaInvQ
* - wavefield arrays are switched in this call
* pCur -> pOld
* pOld -> pCur
* mCur -> mOld
* mOld -> mCur
* 2918.07.26
* - Ken's advice results in 6 derivatives per state variable instead of 11
* - Refactoring from [T D- R-] [S R+ D+] to [T D- ] [R- S R+ D+]
* T 2nd order time update
* D+ forward staggered spatial derivative
* D- backward staggered spatial derivative
* S material parameter sandwich terms
* R+ forward rotation
* R- backward rotation
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void timeStep() {
applyRotationSandwichRotation_TTI_FirstDerivatives3D_PlusHalf_TwoFields(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_pCur, _mCur, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _eps, _eta, _f, _b,
_tmpPg1a, _tmpPg2a, _tmpPg3a, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz);
applyFirstDerivatives3D_MinusHalf_TimeUpdate_Nonlinear(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _dt,
_tmpPg1a, _tmpPg2a, _tmpPg3a, _tmpMg1a, _tmpMg2a, _tmpMg3a, _v, _b, _dtOmegaInvQ,
_pCur, _mCur, _pSpace, _mSpace, _pOld, _mOld, _nbx, _nby, _nbz);
// swap pointers
float *pswap = _pOld;
_pOld = _pCur;
_pCur = pswap;
float *mswap = _mOld;
_mOld = _mCur;
_mCur = mswap;
}
/**
* Scale spatial derivatives by v^2/b to make them temporal derivs
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void scaleSpatialDerivatives() {
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float v2OverB = _v[k] * _v[k] / _b[k];
_pSpace[k] *= v2OverB;
_mSpace[k] *= v2OverB;
}
}
}
}
}
}
}
/**
* Add the Born source at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born source term will be injected into the _pCur array
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_V(float *dVel, float *wavefieldDP, float *wavefieldDM) {
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float dV = dVel[k];
// V^2/b factor to "clear" the b/V^2 factor on L_tP and L_tM
// _dt^2 factor is from the finite difference approximation
// 2B_dV/V^3 factor is from the linearization
const float factor = 2 * _dt * _dt * dV / V;
_pCur[k] += factor * wavefieldDP[k];
_mCur[k] += factor * wavefieldDM[k];
}
}
}
}
}
}
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_VEA(float *dVel, float *dEps, float *dEta,
float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) {
// Right side spatial derivatives for the Born source
applyFirstDerivatives3D_TTI_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldP, wavefieldP, wavefieldP, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1a, _tmpPg2a, _tmpPg3a, _nbx, _nby, _nbz);
applyFirstDerivatives3D_TTI_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldM, wavefieldM, wavefieldM, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz);
// Sandwich terms for the Born source
// note flipped sign for Z derivative term between P and M
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float E = _eps[k];
const float A = _eta[k];
const float B = _b[k];
const float F = _f[k];
const float dV = dVel[k];
const float dE = dEps[k];
const float dA = dEta[k];
_tmpPg1b[k] = (+2 * B * dE) *_tmpPg1a[k];
_tmpPg2b[k] = (+2 * B * dE) *_tmpPg2a[k];
_tmpPg3b[k] = (-2 * B * F * A * dA) *_tmpPg3a[k] +
(dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMg3a[k];
_tmpMg1b[k] = 0;
_tmpMg2b[k] = 0;
_tmpMg3b[k] = (+2 * B * F * A * dA) *_tmpMg3a[k] +
(dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPg3a[k];
}
}
}
}
}
}
// Left side spatial derivatives for the Born source
applyFirstDerivatives3D_TTI_MinusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_tmpPg1b, _tmpPg2b, _tmpPg3b, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1a, _tmpPg2a, _tmpPg3a, _nbx, _nby, _nbz);
applyFirstDerivatives3D_TTI_MinusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_tmpMg1b, _tmpMg2b, _tmpMg3b, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz);
// add the born source at the current time
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float dV = dVel[k];
const float dt2v2OverB = _dt * _dt * V * V / B;
const float factor = 2 * B * dV / (V * V * V);
_pCur[k] += dt2v2OverB * (factor * wavefieldDP[k] + _tmpPg1a[k] + _tmpPg2a[k] + _tmpPg3a[k]);
_mCur[k] += dt2v2OverB * (factor * wavefieldDM[k] + _tmpMg1a[k] + _tmpMg2a[k] + _tmpMg3a[k]);
}
}
}
}
}
}
}
/**
* Accumulate the Born image term at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born image term will be accumulated iu the _dm array
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_V(float *dVel,
float *wavefieldDP, float *wavefieldDM) {
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float factor = 2 * B / (V * V * V);
dVel[k] += factor * (wavefieldDP[k] * _pOld[k] + wavefieldDM[k] * _mOld[k]);
}
}
}
}
}
}
}
/**
* Apply Kz wavenumber filter for up/down wavefield seperation
* Faqi, 2011, Geophysics https://library.seg.org/doi/full/10.1190/1.3533914
*
* We handle the FWI and RTM imaging conditions with a condition inside the OMP loop
*
* Example Kz filtering with 8 samples
* frequency | +0 | +1 | +2 | +3 | N | -3 | -2 | -1 |
* original | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* upgoing | 0 | X | X | X | 4 | 5 | 6 | 7 |
* dngoing | 0 | 1 | 2 | 3 | 4 | X | X | X |
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_wavefieldsep_V(float *dVel,
float *wavefieldDP, float *wavefieldDM, const long isFWI) {
const long nfft = 2 * _nz;
const float scale = 1.0f / (float)(nfft);
// FWI: adj wavefield is dngoing
// RTM: adj wavefield is upgoing
const long kfft_adj = (isFWI) ? 0 : nfft / 2;
std::complex<float> * __restrict__ tmp = new std::complex<float>[nfft];
fftwf_plan planForward = fftwf_plan_dft_1d(nfft,
reinterpret_cast<fftwf_complex*>(tmp),
reinterpret_cast<fftwf_complex*>(tmp), +1, FFTW_ESTIMATE);
fftwf_plan planInverse = fftwf_plan_dft_1d(nfft,
reinterpret_cast<fftwf_complex*>(tmp),
reinterpret_cast<fftwf_complex*>(tmp), -1, FFTW_ESTIMATE);
delete [] tmp;
#pragma omp parallel num_threads(_nthread)
{
std::complex<float> * __restrict__ tmp_nlf_p = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_adj_p = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_nlf_m = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_adj_m = new std::complex<float>[nfft];
#pragma omp for collapse(2) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kfft = 0; kfft < nfft; kfft++) {
tmp_nlf_p[kfft] = 0;
tmp_adj_p[kfft] = 0;
tmp_nlf_m[kfft] = 0;
tmp_adj_m[kfft] = 0;
}
#pragma omp simd
for (long kz = 0; kz < _nz; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
tmp_nlf_p[kz] = scale * wavefieldDP[k];
tmp_adj_p[kz] = scale * _pOld[k];
tmp_nlf_m[kz] = scale * wavefieldDM[k];
tmp_adj_m[kz] = scale * _mOld[k];
}
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_nlf_p),
reinterpret_cast<fftwf_complex*>(tmp_nlf_p));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_adj_p),
reinterpret_cast<fftwf_complex*>(tmp_adj_p));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_nlf_m),
reinterpret_cast<fftwf_complex*>(tmp_nlf_m));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_adj_m),
reinterpret_cast<fftwf_complex*>(tmp_adj_m));
// upgoing: zero the positive frequencies, excluding Nyquist
// dngoing: zero the negative frequencies, excluding Nyquist
#pragma omp simd
for (long k = 1; k < nfft / 2; k++) {
tmp_nlf_p[nfft / 2 + k] = 0;
tmp_adj_p[kfft_adj + k] = 0;
tmp_nlf_m[nfft / 2 + k] = 0;
tmp_adj_m[kfft_adj + k] = 0;
}
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_nlf_p),
reinterpret_cast<fftwf_complex*>(tmp_nlf_p));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_adj_p),
reinterpret_cast<fftwf_complex*>(tmp_adj_p));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_nlf_m),
reinterpret_cast<fftwf_complex*>(tmp_nlf_m));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_adj_m),
reinterpret_cast<fftwf_complex*>(tmp_adj_m));
// Faqi eq 10
// Applied to FWI: [Sup * Rdn]
// Applied to RTM: [Sup * Rup]
#pragma omp simd
for (long kz = 0; kz < _nz; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float factor = 2 * B / (V * V * V);
dVel[k] += factor * (real(tmp_nlf_p[kz] * tmp_adj_p[kz]) + real(tmp_nlf_m[kz] * tmp_adj_m[kz]));
}
} // end loop over ky
} // end loop over kx
} // end loop over by
} // end loop over bx
delete [] tmp_nlf_p;
delete [] tmp_adj_p;
delete [] tmp_nlf_m;
delete [] tmp_adj_m;
} // end parallel region
fftwf_destroy_plan(planForward);
fftwf_destroy_plan(planInverse);
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_VEA(float *dVel, float *dEps, float *dEta,
float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) {
// Right side spatial derivatives for the adjoint accumulation
applyFirstDerivatives3D_TTI_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldP, wavefieldP, wavefieldP, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1a, _tmpPg2a, _tmpPg3a, _nbx, _nby, _nbz);
applyFirstDerivatives3D_TTI_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldM, wavefieldM, wavefieldM, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1a, _tmpMg2a, _tmpMg3a, _nbx, _nby, _nbz);
applyFirstDerivatives3D_TTI_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_pOld, _pOld, _pOld, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpPg1b, _tmpPg2b, _tmpPg3b, _nbx, _nby, _nbz);
applyFirstDerivatives3D_TTI_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_mOld, _mOld, _mOld, _sinTheta, _cosTheta, _sinPhi, _cosPhi, _tmpMg1b, _tmpMg2b, _tmpMg3b, _nbx, _nby, _nbz);
// Sandwich terms for the adjoint accumulation
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float E = _eps[k];
const float A = _eta[k];
const float B = _b[k];
const float F = _f[k];
const float factor = 2 * B / (V * V * V);
dVel[k] += factor * (wavefieldDP[k] * _pOld[k] + wavefieldDM[k] * _mOld[k]);
dEps[k] += (-2 * B * _tmpPg1a[k] * _tmpPg1b[k] -2 * B * _tmpPg2a[k] * _tmpPg2b[k]);
const float partP = 2 * B * F * A * _tmpPg3a[k] - (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMg3a[k];
const float partM = 2 * B * F * A * _tmpMg3a[k] + (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPg3a[k];
dEta[k] += (partP * _tmpPg3b[k] - partM * _tmpMg3b[k]);
}
}
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyRotationSandwichRotation_TTI_FirstDerivatives3D_PlusHalf_TwoFields(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
Type * __restrict__ inP,
Type * __restrict__ inM,
float * __restrict__ sinTheta,
float * __restrict__ cosTheta,
float * __restrict__ sinPhi,
float * __restrict__ cosPhi,
Type * __restrict__ fieldEps,
Type * __restrict__ fieldEta,
Type * __restrict__ fieldVsVp,
Type * __restrict__ fieldBuoy,
Type * __restrict__ outPx,
Type * __restrict__ outPy,
Type * __restrict__ outPz,
Type * __restrict__ outMx,
Type * __restrict__ outMy,
Type * __restrict__ outMz,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
outPx[kindex1] = outPx[kindex2] = 0;
outPy[kindex1] = outPy[kindex2] = 0;
outPz[kindex1] = outPz[kindex2] = 0;
outMx[kindex1] = outMx[kindex2] = 0;
outMy[kindex1] = outMy[kindex2] = 0;
outMz[kindex1] = outMz[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
outPx[kindex1] = outPx[kindex2] = 0;
outPy[kindex1] = outPy[kindex2] = 0;
outPz[kindex1] = outPz[kindex2] = 0;
outMx[kindex1] = outMx[kindex2] = 0;
outMy[kindex1] = outMy[kindex2] = 0;
outMz[kindex1] = outMz[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
long kindex1 = k * ny * nz + ky * nz + kz;
long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
outPx[kindex1] = outPx[kindex2] = 0;
outPy[kindex1] = outPy[kindex2] = 0;
outPz[kindex1] = outPz[kindex2] = 0;
outMx[kindex1] = outMx[kindex2] = 0;
outMy[kindex1] = outMy[kindex2] = 0;
outMz[kindex1] = outMz[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kynz_kz = + kynz + kz;
const Type stencilPDx =
c8_1 * (- inP[(kx+0) * nynz + kynz_kz] + inP[(kx+1) * nynz + kynz_kz]) +
c8_2 * (- inP[(kx-1) * nynz + kynz_kz] + inP[(kx+2) * nynz + kynz_kz]) +
c8_3 * (- inP[(kx-2) * nynz + kynz_kz] + inP[(kx+3) * nynz + kynz_kz]) +
c8_4 * (- inP[(kx-3) * nynz + kynz_kz] + inP[(kx+4) * nynz + kynz_kz]);
const Type stencilPDy =
c8_1 * (- inP[kxnynz + (ky+0) * nz + kz] + inP[kxnynz + (ky+1) * nz + kz]) +
c8_2 * (- inP[kxnynz + (ky-1) * nz + kz] + inP[kxnynz + (ky+2) * nz + kz]) +
c8_3 * (- inP[kxnynz + (ky-2) * nz + kz] + inP[kxnynz + (ky+3) * nz + kz]) +
c8_4 * (- inP[kxnynz + (ky-3) * nz + kz] + inP[kxnynz + (ky+4) * nz + kz]);
const Type stencilPDz =
c8_1 * (- inP[kxnynz_kynz + (kz+0)] + inP[kxnynz_kynz + (kz+1)]) +
c8_2 * (- inP[kxnynz_kynz + (kz-1)] + inP[kxnynz_kynz + (kz+2)]) +
c8_3 * (- inP[kxnynz_kynz + (kz-2)] + inP[kxnynz_kynz + (kz+3)]) +
c8_4 * (- inP[kxnynz_kynz + (kz-3)] + inP[kxnynz_kynz + (kz+4)]);
const Type stencilMDx =
c8_1 * (- inM[(kx+0) * nynz + kynz_kz] + inM[(kx+1) * nynz + kynz_kz]) +
c8_2 * (- inM[(kx-1) * nynz + kynz_kz] + inM[(kx+2) * nynz + kynz_kz]) +
c8_3 * (- inM[(kx-2) * nynz + kynz_kz] + inM[(kx+3) * nynz + kynz_kz]) +
c8_4 * (- inM[(kx-3) * nynz + kynz_kz] + inM[(kx+4) * nynz + kynz_kz]);
const Type stencilMDy =
c8_1 * (- inM[kxnynz + (ky+0) * nz + kz] + inM[kxnynz + (ky+1) * nz + kz]) +
c8_2 * (- inM[kxnynz + (ky-1) * nz + kz] + inM[kxnynz + (ky+2) * nz + kz]) +
c8_3 * (- inM[kxnynz + (ky-2) * nz + kz] + inM[kxnynz + (ky+3) * nz + kz]) +
c8_4 * (- inM[kxnynz + (ky-3) * nz + kz] + inM[kxnynz + (ky+4) * nz + kz]);
const Type stencilMDz =
c8_1 * (- inM[kxnynz_kynz + (kz+0)] + inM[kxnynz_kynz + (kz+1)]) +
c8_2 * (- inM[kxnynz_kynz + (kz-1)] + inM[kxnynz_kynz + (kz+2)]) +
c8_3 * (- inM[kxnynz_kynz + (kz-2)] + inM[kxnynz_kynz + (kz+3)]) +
c8_4 * (- inM[kxnynz_kynz + (kz-3)] + inM[kxnynz_kynz + (kz+4)]);
const Type dpdx = invDx * stencilPDx;
const Type dpdy = invDy * stencilPDy;
const Type dpdz = invDz * stencilPDz;
const Type dmdx = invDx * stencilMDx;
const Type dmdy = invDy * stencilMDy;
const Type dmdz = invDz * stencilMDz;
const long k = kx * ny * nz + ky * nz + kz;
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
const float sinThetaSinPhi = sinTheta[k] * sinPhi[k];
const Type fieldEta2 = fieldEta[k] * fieldEta[k];
const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k];
const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz;
const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz;
const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2);
const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M;
const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M;
const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]);
const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]);
outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP;
outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP;
outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP;
outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM;
outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM;
outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM;
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative
// X and Y derivatives are identically zero
{
const Type stencilPDz0 =
c8_1 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 1]) +
c8_2 * (+ inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 2]) +
c8_3 * (+ inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 3]) +
c8_4 * (+ inP[kxnynz_kynz + 3] + inP[kxnynz_kynz + 4]);
const Type stencilMDz0 =
c8_1 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 1]) +
c8_2 * (+ inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 2]) +
c8_3 * (+ inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 3]) +
c8_4 * (+ inM[kxnynz_kynz + 3] + inM[kxnynz_kynz + 4]);
const Type dpdx = 0;
const Type dpdy = 0;
const Type dpdz = invDz * stencilPDz0;
const Type dmdx = 0;
const Type dmdy = 0;
const Type dmdz = invDz * stencilMDz0;
const long k = kx * ny * nz + ky * nz + 0;
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
const float sinThetaSinPhi = sinTheta[k] * sinPhi[k];
const Type fieldEta2 = fieldEta[k] * fieldEta[k];
const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k];
const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz;
const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz;
const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2);
const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M;
const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M;
const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]);
const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]);
outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP;
outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP;
outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP;
outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM;
outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM;
outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM;
}
// kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative
{
const Type stencilPDx1 =
c8_1 * (- inP[(kx+0) * nynz + kynz + 1] + inP[(kx+1) * nynz + kynz + 1]) +
c8_2 * (- inP[(kx-1) * nynz + kynz + 1] + inP[(kx+2) * nynz + kynz + 1]) +
c8_3 * (- inP[(kx-2) * nynz + kynz + 1] + inP[(kx+3) * nynz + kynz + 1]) +
c8_4 * (- inP[(kx-3) * nynz + kynz + 1] + inP[(kx+4) * nynz + kynz + 1]);
const Type stencilPDy1 =
c8_1 * (- inP[kxnynz + (ky+0) * nz + 1] + inP[kxnynz + (ky+1) * nz + 1]) +
c8_2 * (- inP[kxnynz + (ky-1) * nz + 1] + inP[kxnynz + (ky+2) * nz + 1]) +
c8_3 * (- inP[kxnynz + (ky-2) * nz + 1] + inP[kxnynz + (ky+3) * nz + 1]) +
c8_4 * (- inP[kxnynz + (ky-3) * nz + 1] + inP[kxnynz + (ky+4) * nz + 1]);
const Type stencilPDz1 =
c8_1 * (- inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 2]) +
c8_2 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 3]) +
c8_3 * (+ inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 4]) +
c8_4 * (+ inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 5]);
const Type stencilMDx1 =
c8_1 * (- inM[(kx+0) * nynz + kynz + 1] + inM[(kx+1) * nynz + kynz + 1]) +
c8_2 * (- inM[(kx-1) * nynz + kynz + 1] + inM[(kx+2) * nynz + kynz + 1]) +
c8_3 * (- inM[(kx-2) * nynz + kynz + 1] + inM[(kx+3) * nynz + kynz + 1]) +
c8_4 * (- inM[(kx-3) * nynz + kynz + 1] + inM[(kx+4) * nynz + kynz + 1]);
const Type stencilMDy1 =
c8_1 * (- inM[kxnynz + (ky+0) * nz + 1] + inM[kxnynz + (ky+1) * nz + 1]) +
c8_2 * (- inM[kxnynz + (ky-1) * nz + 1] + inM[kxnynz + (ky+2) * nz + 1]) +
c8_3 * (- inM[kxnynz + (ky-2) * nz + 1] + inM[kxnynz + (ky+3) * nz + 1]) +
c8_4 * (- inM[kxnynz + (ky-3) * nz + 1] + inM[kxnynz + (ky+4) * nz + 1]);
const Type stencilMDz1 =
c8_1 * (- inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 2]) +
c8_2 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 3]) +
c8_3 * (+ inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 4]) +
c8_4 * (+ inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 5]);
const Type dpdx = invDx * stencilPDx1;
const Type dpdy = invDy * stencilPDy1;
const Type dpdz = invDz * stencilPDz1;
const Type dmdx = invDx * stencilMDx1;
const Type dmdy = invDy * stencilMDy1;
const Type dmdz = invDz * stencilMDz1;
const long k = kx * ny * nz + ky * nz + 1;
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
const float sinThetaSinPhi = sinTheta[k] * sinPhi[k];
const Type fieldEta2 = fieldEta[k] * fieldEta[k];
const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k];
const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz;
const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz;
const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2);
const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M;
const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M;
const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]);
const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]);
outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP;
outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP;
outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP;
outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM;
outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM;
outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM;
}
// kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative
{
const Type stencilPDx2 =
c8_1 * (- inP[(kx+0) * nynz + kynz + 2] + inP[(kx+1) * nynz + kynz + 2]) +
c8_2 * (- inP[(kx-1) * nynz + kynz + 2] + inP[(kx+2) * nynz + kynz + 2]) +
c8_3 * (- inP[(kx-2) * nynz + kynz + 2] + inP[(kx+3) * nynz + kynz + 2]) +
c8_4 * (- inP[(kx-3) * nynz + kynz + 2] + inP[(kx+4) * nynz + kynz + 2]);
const Type stencilPDy2 =
c8_1 * (- inP[kxnynz + (ky+0) * nz + 2] + inP[kxnynz + (ky+1) * nz + 2]) +
c8_2 * (- inP[kxnynz + (ky-1) * nz + 2] + inP[kxnynz + (ky+2) * nz + 2]) +
c8_3 * (- inP[kxnynz + (ky-2) * nz + 2] + inP[kxnynz + (ky+3) * nz + 2]) +
c8_4 * (- inP[kxnynz + (ky-3) * nz + 2] + inP[kxnynz + (ky+4) * nz + 2]);
const Type stencilPDz2 =
c8_1 * (- inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 3]) +
c8_2 * (- inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 4]) +
c8_3 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 5]) +
c8_4 * (+ inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 6]);
const Type stencilMDx2 =
c8_1 * (- inM[(kx+0) * nynz + kynz + 2] + inM[(kx+1) * nynz + kynz + 2]) +
c8_2 * (- inM[(kx-1) * nynz + kynz + 2] + inM[(kx+2) * nynz + kynz + 2]) +
c8_3 * (- inM[(kx-2) * nynz + kynz + 2] + inM[(kx+3) * nynz + kynz + 2]) +
c8_4 * (- inM[(kx-3) * nynz + kynz + 2] + inM[(kx+4) * nynz + kynz + 2]);
const Type stencilMDy2 =
c8_1 * (- inM[kxnynz + (ky+0) * nz + 2] + inM[kxnynz + (ky+1) * nz + 2]) +
c8_2 * (- inM[kxnynz + (ky-1) * nz + 2] + inM[kxnynz + (ky+2) * nz + 2]) +
c8_3 * (- inM[kxnynz + (ky-2) * nz + 2] + inM[kxnynz + (ky+3) * nz + 2]) +
c8_4 * (- inM[kxnynz + (ky-3) * nz + 2] + inM[kxnynz + (ky+4) * nz + 2]);
const Type stencilMDz2 =
c8_1 * (- inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 3]) +
c8_2 * (- inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 4]) +
c8_3 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 5]) +
c8_4 * (+ inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 6]);
const Type dpdx = invDx * stencilPDx2;
const Type dpdy = invDy * stencilPDy2;
const Type dpdz = invDz * stencilPDz2;
const Type dmdx = invDx * stencilMDx2;
const Type dmdy = invDy * stencilMDy2;
const Type dmdz = invDz * stencilMDz2;
const long k = kx * ny * nz + ky * nz + 2;
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
const float sinThetaSinPhi = sinTheta[k] * sinPhi[k];
const Type fieldEta2 = fieldEta[k] * fieldEta[k];
const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k];
const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz;
const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz;
const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2);
const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M;
const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M;
const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]);
const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]);
outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP;
outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP;
outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP;
outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM;
outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM;
outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM;
}
// kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative
{
const Type stencilPDx3 =
c8_1 * (- inP[(kx+0) * nynz + kynz + 3] + inP[(kx+1) * nynz + kynz + 3]) +
c8_2 * (- inP[(kx-1) * nynz + kynz + 3] + inP[(kx+2) * nynz + kynz + 3]) +
c8_3 * (- inP[(kx-2) * nynz + kynz + 3] + inP[(kx+3) * nynz + kynz + 3]) +
c8_4 * (- inP[(kx-3) * nynz + kynz + 3] + inP[(kx+4) * nynz + kynz + 3]);
const Type stencilPDy3 =
c8_1 * (- inP[kxnynz + (ky+0) * nz + 3] + inP[kxnynz + (ky+1) * nz + 3]) +
c8_2 * (- inP[kxnynz + (ky-1) * nz + 3] + inP[kxnynz + (ky+2) * nz + 3]) +
c8_3 * (- inP[kxnynz + (ky-2) * nz + 3] + inP[kxnynz + (ky+3) * nz + 3]) +
c8_4 * (- inP[kxnynz + (ky-3) * nz + 3] + inP[kxnynz + (ky+4) * nz + 3]);
const Type stencilPDz3 =
c8_1 * (- inP[kxnynz_kynz + 3] + inP[kxnynz_kynz + 4]) +
c8_2 * (- inP[kxnynz_kynz + 2] + inP[kxnynz_kynz + 5]) +
c8_3 * (- inP[kxnynz_kynz + 1] + inP[kxnynz_kynz + 6]) +
c8_4 * (- inP[kxnynz_kynz + 0] + inP[kxnynz_kynz + 7]);
const Type stencilMDx3 =
c8_1 * (- inM[(kx+0) * nynz + kynz + 3] + inM[(kx+1) * nynz + kynz + 3]) +
c8_2 * (- inM[(kx-1) * nynz + kynz + 3] + inM[(kx+2) * nynz + kynz + 3]) +
c8_3 * (- inM[(kx-2) * nynz + kynz + 3] + inM[(kx+3) * nynz + kynz + 3]) +
c8_4 * (- inM[(kx-3) * nynz + kynz + 3] + inM[(kx+4) * nynz + kynz + 3]);
const Type stencilMDy3 =
c8_1 * (- inM[kxnynz + (ky+0) * nz + 3] + inM[kxnynz + (ky+1) * nz + 3]) +
c8_2 * (- inM[kxnynz + (ky-1) * nz + 3] + inM[kxnynz + (ky+2) * nz + 3]) +
c8_3 * (- inM[kxnynz + (ky-2) * nz + 3] + inM[kxnynz + (ky+3) * nz + 3]) +
c8_4 * (- inM[kxnynz + (ky-3) * nz + 3] + inM[kxnynz + (ky+4) * nz + 3]);
const Type stencilMDz3 =
c8_1 * (- inM[kxnynz_kynz + 3] + inM[kxnynz_kynz + 4]) +
c8_2 * (- inM[kxnynz_kynz + 2] + inM[kxnynz_kynz + 5]) +
c8_3 * (- inM[kxnynz_kynz + 1] + inM[kxnynz_kynz + 6]) +
c8_4 * (- inM[kxnynz_kynz + 0] + inM[kxnynz_kynz + 7]);
const Type dpdx = invDx * stencilPDx3;
const Type dpdy = invDy * stencilPDy3;
const Type dpdz = invDz * stencilPDz3;
const Type dmdx = invDx * stencilMDx3;
const Type dmdy = invDy * stencilMDy3;
const Type dmdz = invDz * stencilMDz3;
const long k = kx * ny * nz + ky * nz + 3;
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
const float sinThetaSinPhi = sinTheta[k] * sinPhi[k];
const Type fieldEta2 = fieldEta[k] * fieldEta[k];
const Type fieldBuoyVsVp = fieldBuoy[k] * fieldVsVp[k];
const Type g3P = sinThetaCosPhi * dpdx + sinThetaSinPhi * dpdy + cosTheta[k] * dpdz;
const Type g3M = sinThetaCosPhi * dmdx + sinThetaSinPhi * dmdy + cosTheta[k] * dmdz;
const Type tmpFE = fieldBuoyVsVp * fieldEta[k] * sqrt(1 - fieldEta2);
const Type tmpP = - fieldBuoy[k] * (2 * fieldEps[k] + fieldVsVp[k] * fieldEta2) * g3P + tmpFE * g3M;
const Type tmpM = tmpFE * g3P + fieldBuoyVsVp * fieldEta2 * g3M;
const Type tmpE = fieldBuoy[k] * (1 + 2 * fieldEps[k]);
const Type tmpF = fieldBuoy[k] * (1 - fieldVsVp[k]);
outPx[k] = tmpE * dpdx + sinThetaCosPhi * tmpP;
outPy[k] = tmpE * dpdy + sinThetaSinPhi * tmpP;
outPz[k] = tmpE * dpdz + cosTheta[k] * tmpP;
outMx[k] = tmpF * dmdx + sinThetaCosPhi * tmpM;
outMy[k] = tmpF * dmdy + sinThetaSinPhi * tmpM;
outMz[k] = tmpF * dmdz + cosTheta[k] * tmpM;
}
}
}
}
}
/**
* Combines
* applyFirstDerivatives_MinusHalf(P)
* secondOrderTimeUpdate_BubeConservation(P)
* applyFirstDerivatives_MinusHalf(M)
* secondOrderTimeUpdate_BubeConservation(M)
*
* Updates pOld and mOld with second order time update
* see notes in method secondOrderTimeUpdate_BubeConservation()
*
* Nonlinear method: outputs the spatial derivatives for serialization
* Linear method: does not output the spatial derivatives
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives3D_MinusHalf_TimeUpdate_Nonlinear(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
const Type dtMod,
const Type * __restrict__ const tmpPX,
const Type * __restrict__ const tmpPY,
const Type * __restrict__ const tmpPZ,
const Type * __restrict__ const tmpMX,
const Type * __restrict__ const tmpMY,
const Type * __restrict__ const tmpMZ,
const Type * __restrict__ const fieldVel,
const Type * __restrict__ const fieldBuoy,
const Type * __restrict__ const dtOmegaInvQ,
const Type * __restrict__ const pCur,
const Type * __restrict__ const mCur,
Type * __restrict__ pSpace,
Type * __restrict__ mSpace,
Type * __restrict__ pOld,
Type * __restrict__ mOld,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
const Type dt2 = dtMod * dtMod;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
pSpace[kindex1] = pSpace[kindex2] = 0;
mSpace[kindex1] = mSpace[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
pSpace[kindex1] = pSpace[kindex2] = 0;
mSpace[kindex1] = mSpace[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
pSpace[kindex1] = pSpace[kindex2] = 0;
mSpace[kindex1] = mSpace[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kxnynz_kynz + kz;
const long kynz_kz = + kynz + kz;
const Type stencilDPx =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz_kz] + tmpPX[(kx+0) * nynz + kynz_kz]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz_kz] + tmpPX[(kx+1) * nynz + kynz_kz]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz_kz] + tmpPX[(kx+2) * nynz + kynz_kz]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz_kz] + tmpPX[(kx+3) * nynz + kynz_kz]);
const Type stencilDPy =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + kz] + tmpPY[kxnynz + (ky+0) * nz + kz]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + kz] + tmpPY[kxnynz + (ky+1) * nz + kz]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + kz] + tmpPY[kxnynz + (ky+2) * nz + kz]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + kz] + tmpPY[kxnynz + (ky+3) * nz + kz]);
const Type stencilDPz =
c8_1 * (- tmpPZ[kxnynz_kynz + (kz-1)] + tmpPZ[kxnynz_kynz + (kz+0)]) +
c8_2 * (- tmpPZ[kxnynz_kynz + (kz-2)] + tmpPZ[kxnynz_kynz + (kz+1)]) +
c8_3 * (- tmpPZ[kxnynz_kynz + (kz-3)] + tmpPZ[kxnynz_kynz + (kz+2)]) +
c8_4 * (- tmpPZ[kxnynz_kynz + (kz-4)] + tmpPZ[kxnynz_kynz + (kz+3)]);
const Type stencilDMx =
c8_1 * (- tmpMX[(kx-1) * nynz + kynz_kz] + tmpMX[(kx+0) * nynz + kynz_kz]) +
c8_2 * (- tmpMX[(kx-2) * nynz + kynz_kz] + tmpMX[(kx+1) * nynz + kynz_kz]) +
c8_3 * (- tmpMX[(kx-3) * nynz + kynz_kz] + tmpMX[(kx+2) * nynz + kynz_kz]) +
c8_4 * (- tmpMX[(kx-4) * nynz + kynz_kz] + tmpMX[(kx+3) * nynz + kynz_kz]);
const Type stencilDMy =
c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + kz] + tmpMY[kxnynz + (ky+0) * nz + kz]) +
c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + kz] + tmpMY[kxnynz + (ky+1) * nz + kz]) +
c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + kz] + tmpMY[kxnynz + (ky+2) * nz + kz]) +
c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + kz] + tmpMY[kxnynz + (ky+3) * nz + kz]);
const Type stencilDMz =
c8_1 * (- tmpMZ[kxnynz_kynz + (kz-1)] + tmpMZ[kxnynz_kynz + (kz+0)]) +
c8_2 * (- tmpMZ[kxnynz_kynz + (kz-2)] + tmpMZ[kxnynz_kynz + (kz+1)]) +
c8_3 * (- tmpMZ[kxnynz_kynz + (kz-3)] + tmpMZ[kxnynz_kynz + (kz+2)]) +
c8_4 * (- tmpMZ[kxnynz_kynz + (kz-4)] + tmpMZ[kxnynz_kynz + (kz+3)]);
const Type dPx = invDx * stencilDPx;
const Type dPy = invDy * stencilDPy;
const Type dPz = invDz * stencilDPz;
const Type dMx = invDx * stencilDMx;
const Type dMy = invDy * stencilDMy;
const Type dMz = invDz * stencilDMz;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPy + dPz;
mSpace[k] = dMx + dMy + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- at the free surface -- p = 0
// [kxnynz_kynz + 0]
{
const Type dPx = 0;
const Type dPy = 0;
const Type dPz = 0;
const Type dMx = 0;
const Type dMy = 0;
const Type dMz = 0;
const long k = kxnynz_kynz + 0;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * (dMx + dMy + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
pSpace[k] = dPx + dPy + dPz;
mSpace[k] = dMx + dMy + dMz;
}
// kz = 1 -- one cell below the free surface
// [kxnynz_kynz + 1]
{
const Type stencilDPx1 =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 1] + tmpPX[(kx+0) * nynz + kynz + 1]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 1] + tmpPX[(kx+1) * nynz + kynz + 1]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 1] + tmpPX[(kx+2) * nynz + kynz + 1]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 1] + tmpPX[(kx+3) * nynz + kynz + 1]);
const Type stencilDPy1 =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 1] + tmpPY[kxnynz + (ky+0) * nz + 1]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 1] + tmpPY[kxnynz + (ky+1) * nz + 1]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 1] + tmpPY[kxnynz + (ky+2) * nz + 1]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 1] + tmpPY[kxnynz + (ky+3) * nz + 1]);
const Type stencilDPz1 =
c8_1 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 1]) +
c8_2 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 2]) +
c8_3 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 3]) +
c8_4 * (- tmpPZ[kxnynz_kynz + 2] + tmpPZ[kxnynz_kynz + 4]);
const Type stencilDMx1 =
c8_1 * (- tmpMX[(kx-1) * nynz + kynz + 1] + tmpMX[(kx+0) * nynz + kynz + 1]) +
c8_2 * (- tmpMX[(kx-2) * nynz + kynz + 1] + tmpMX[(kx+1) * nynz + kynz + 1]) +
c8_3 * (- tmpMX[(kx-3) * nynz + kynz + 1] + tmpMX[(kx+2) * nynz + kynz + 1]) +
c8_4 * (- tmpMX[(kx-4) * nynz + kynz + 1] + tmpMX[(kx+3) * nynz + kynz + 1]);
const Type stencilDMy1 =
c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + 1] + tmpMY[kxnynz + (ky+0) * nz + 1]) +
c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + 1] + tmpMY[kxnynz + (ky+1) * nz + 1]) +
c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + 1] + tmpMY[kxnynz + (ky+2) * nz + 1]) +
c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + 1] + tmpMY[kxnynz + (ky+3) * nz + 1]);
const Type stencilDMz1 =
c8_1 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 1]) +
c8_2 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 2]) +
c8_3 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 3]) +
c8_4 * (- tmpMZ[kxnynz_kynz + 2] + tmpMZ[kxnynz_kynz + 4]);
const Type dPx = invDx * stencilDPx1;
const Type dPy = invDy * stencilDPy1;
const Type dPz = invDz * stencilDPz1;
const Type dMx = invDx * stencilDMx1;
const Type dMy = invDy * stencilDMy1;
const Type dMz = invDz * stencilDMz1;
const long k = kxnynz_kynz + 1;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPy + dPz;
mSpace[k] = dMx + dMy + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 2 -- two cells below the free surface
// [kxnynz_kynz + 2]
{
const Type stencilDPx2 =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 2] + tmpPX[(kx+0) * nynz + kynz + 2]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 2] + tmpPX[(kx+1) * nynz + kynz + 2]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 2] + tmpPX[(kx+2) * nynz + kynz + 2]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 2] + tmpPX[(kx+3) * nynz + kynz + 2]);
const Type stencilDPy2 =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 2] + tmpPY[kxnynz + (ky+0) * nz + 2]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 2] + tmpPY[kxnynz + (ky+1) * nz + 2]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 2] + tmpPY[kxnynz + (ky+2) * nz + 2]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 2] + tmpPY[kxnynz + (ky+3) * nz + 2]);
const Type stencilDPz2 =
c8_1 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 2]) +
c8_2 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 3]) +
c8_3 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 4]) +
c8_4 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 5]);
const Type stencilDMx2 =
c8_1 * (- tmpMX[(kx-1) * nynz + kynz + 2] + tmpMX[(kx+0) * nynz + kynz + 2]) +
c8_2 * (- tmpMX[(kx-2) * nynz + kynz + 2] + tmpMX[(kx+1) * nynz + kynz + 2]) +
c8_3 * (- tmpMX[(kx-3) * nynz + kynz + 2] + tmpMX[(kx+2) * nynz + kynz + 2]) +
c8_4 * (- tmpMX[(kx-4) * nynz + kynz + 2] + tmpMX[(kx+3) * nynz + kynz + 2]);
const Type stencilDMy2 =
c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + 2] + tmpMY[kxnynz + (ky+0) * nz + 2]) +
c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + 2] + tmpMY[kxnynz + (ky+1) * nz + 2]) +
c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + 2] + tmpMY[kxnynz + (ky+2) * nz + 2]) +
c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + 2] + tmpMY[kxnynz + (ky+3) * nz + 2]);
const Type stencilDMz2 =
c8_1 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 2]) +
c8_2 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 3]) +
c8_3 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 4]) +
c8_4 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 5]);
const Type dPx = invDx * stencilDPx2;
const Type dPy = invDy * stencilDPy2;
const Type dPz = invDz * stencilDPz2;
const Type dMx = invDx * stencilDMx2;
const Type dMy = invDy * stencilDMy2;
const Type dMz = invDz * stencilDMz2;
const long k = kxnynz_kynz + 2;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPy + dPz;
mSpace[k] = dMx + dMy + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 3 -- three cells below the free surface
// [kxnynz_kynz + 3]
{
const Type stencilDPx3 =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 3] + tmpPX[(kx+0) * nynz + kynz + 3]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 3] + tmpPX[(kx+1) * nynz + kynz + 3]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 3] + tmpPX[(kx+2) * nynz + kynz + 3]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 3] + tmpPX[(kx+3) * nynz + kynz + 3]);
const Type stencilDPy3 =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 3] + tmpPY[kxnynz + (ky+0) * nz + 3]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 3] + tmpPY[kxnynz + (ky+1) * nz + 3]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 3] + tmpPY[kxnynz + (ky+2) * nz + 3]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 3] + tmpPY[kxnynz + (ky+3) * nz + 3]);
const Type stencilDPz3 =
c8_1 * (- tmpPZ[kxnynz_kynz + 2] + tmpPZ[kxnynz_kynz + 3]) +
c8_2 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 4]) +
c8_3 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 5]) +
c8_4 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 6]);
const Type stencilDMx3 =
c8_1 * (- tmpMX[(kx-1) * nynz + kynz + 3] + tmpMX[(kx+0) * nynz + kynz + 3]) +
c8_2 * (- tmpMX[(kx-2) * nynz + kynz + 3] + tmpMX[(kx+1) * nynz + kynz + 3]) +
c8_3 * (- tmpMX[(kx-3) * nynz + kynz + 3] + tmpMX[(kx+2) * nynz + kynz + 3]) +
c8_4 * (- tmpMX[(kx-4) * nynz + kynz + 3] + tmpMX[(kx+3) * nynz + kynz + 3]);
const Type stencilDMy3 =
c8_1 * (- tmpMY[kxnynz + (ky-1) * nz + 3] + tmpMY[kxnynz + (ky+0) * nz + 3]) +
c8_2 * (- tmpMY[kxnynz + (ky-2) * nz + 3] + tmpMY[kxnynz + (ky+1) * nz + 3]) +
c8_3 * (- tmpMY[kxnynz + (ky-3) * nz + 3] + tmpMY[kxnynz + (ky+2) * nz + 3]) +
c8_4 * (- tmpMY[kxnynz + (ky-4) * nz + 3] + tmpMY[kxnynz + (ky+3) * nz + 3]);
const Type stencilDMz3 =
c8_1 * (- tmpMZ[kxnynz_kynz + 2] + tmpMZ[kxnynz_kynz + 3]) +
c8_2 * (- tmpMZ[kxnynz_kynz + 1] + tmpMZ[kxnynz_kynz + 4]) +
c8_3 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 5]) +
c8_4 * (- tmpMZ[kxnynz_kynz + 0] + tmpMZ[kxnynz_kynz + 6]);
const Type dPx = invDx * stencilDPx3;
const Type dPy = invDy * stencilDPy3;
const Type dPz = invDz * stencilDPz3;
const Type dMx = invDx * stencilDMx3;
const Type dMy = invDy * stencilDMy3;
const Type dMz = invDz * stencilDMz3;
const long k = kxnynz_kynz + 3;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPy + dPz;
mSpace[k] = dMx + dMy + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives3D_TTI_PlusHalf_Sandwich(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
Type * __restrict__ inP_G1,
Type * __restrict__ inP_G2,
Type * __restrict__ inP_G3,
Type * __restrict__ inM_G1,
Type * __restrict__ inM_G2,
Type * __restrict__ inM_G3,
Type * __restrict__ fieldEps,
Type * __restrict__ fieldEta,
Type * __restrict__ fieldVsVp,
Type * __restrict__ fieldBuoy,
float * __restrict__ sinTheta,
float * __restrict__ cosTheta,
float * __restrict__ sinPhi,
float * __restrict__ cosPhi,
Type * __restrict__ outP_G1,
Type * __restrict__ outP_G2,
Type * __restrict__ outP_G3,
Type * __restrict__ outM_G1,
Type * __restrict__ outM_G2,
Type * __restrict__ outM_G3,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
long kindex1 = kx * ny * nz + ky * nz + k;
long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
outP_G1[kindex1] = outP_G1[kindex2] = 0;
outP_G2[kindex1] = outP_G2[kindex2] = 0;
outP_G3[kindex1] = outP_G3[kindex2] = 0;
outM_G1[kindex1] = outM_G1[kindex2] = 0;
outM_G2[kindex1] = outM_G2[kindex2] = 0;
outM_G3[kindex1] = outM_G3[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
long kindex1 = kx * ny * nz + k * nz + kz;
long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
outP_G1[kindex1] = outP_G1[kindex2] = 0;
outP_G2[kindex1] = outP_G2[kindex2] = 0;
outP_G3[kindex1] = outP_G3[kindex2] = 0;
outM_G1[kindex1] = outM_G1[kindex2] = 0;
outM_G2[kindex1] = outM_G2[kindex2] = 0;
outM_G3[kindex1] = outM_G3[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
long kindex1 = k * ny * nz + ky * nz + kz;
long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
outP_G1[kindex1] = outP_G1[kindex2] = 0;
outP_G2[kindex1] = outP_G2[kindex2] = 0;
outP_G3[kindex1] = outP_G3[kindex2] = 0;
outM_G1[kindex1] = outM_G1[kindex2] = 0;
outM_G2[kindex1] = outM_G2[kindex2] = 0;
outM_G3[kindex1] = outM_G3[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kynz_kz = + kynz + kz;
const Type stencilP_G1 =
c8_1 * (- inP_G1[(kx+0) * nynz + kynz_kz] + inP_G1[(kx+1) * nynz + kynz_kz]) +
c8_2 * (- inP_G1[(kx-1) * nynz + kynz_kz] + inP_G1[(kx+2) * nynz + kynz_kz]) +
c8_3 * (- inP_G1[(kx-2) * nynz + kynz_kz] + inP_G1[(kx+3) * nynz + kynz_kz]) +
c8_4 * (- inP_G1[(kx-3) * nynz + kynz_kz] + inP_G1[(kx+4) * nynz + kynz_kz]);
const Type stencilP_G2 =
c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + kz] + inP_G2[kxnynz + (ky+1) * nz + kz]) +
c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + kz] + inP_G2[kxnynz + (ky+2) * nz + kz]) +
c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + kz] + inP_G2[kxnynz + (ky+3) * nz + kz]) +
c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + kz] + inP_G2[kxnynz + (ky+4) * nz + kz]);
const Type stencilP_G3 =
c8_1 * (- inP_G3[kxnynz_kynz + (kz+0)] + inP_G3[kxnynz_kynz + (kz+1)]) +
c8_2 * (- inP_G3[kxnynz_kynz + (kz-1)] + inP_G3[kxnynz_kynz + (kz+2)]) +
c8_3 * (- inP_G3[kxnynz_kynz + (kz-2)] + inP_G3[kxnynz_kynz + (kz+3)]) +
c8_4 * (- inP_G3[kxnynz_kynz + (kz-3)] + inP_G3[kxnynz_kynz + (kz+4)]);
const Type stencilM_G1 =
c8_1 * (- inM_G1[(kx+0) * nynz + kynz_kz] + inM_G1[(kx+1) * nynz + kynz_kz]) +
c8_2 * (- inM_G1[(kx-1) * nynz + kynz_kz] + inM_G1[(kx+2) * nynz + kynz_kz]) +
c8_3 * (- inM_G1[(kx-2) * nynz + kynz_kz] + inM_G1[(kx+3) * nynz + kynz_kz]) +
c8_4 * (- inM_G1[(kx-3) * nynz + kynz_kz] + inM_G1[(kx+4) * nynz + kynz_kz]);
const Type stencilM_G2 =
c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + kz] + inM_G2[kxnynz + (ky+1) * nz + kz]) +
c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + kz] + inM_G2[kxnynz + (ky+2) * nz + kz]) +
c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + kz] + inM_G2[kxnynz + (ky+3) * nz + kz]) +
c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + kz] + inM_G2[kxnynz + (ky+4) * nz + kz]);
const Type stencilM_G3 =
c8_1 * (- inM_G3[kxnynz_kynz + (kz+0)] + inM_G3[kxnynz_kynz + (kz+1)]) +
c8_2 * (- inM_G3[kxnynz_kynz + (kz-1)] + inM_G3[kxnynz_kynz + (kz+2)]) +
c8_3 * (- inM_G3[kxnynz_kynz + (kz-2)] + inM_G3[kxnynz_kynz + (kz+3)]) +
c8_4 * (- inM_G3[kxnynz_kynz + (kz-3)] + inM_G3[kxnynz_kynz + (kz+4)]);
const Type dpx = invDx * stencilP_G1;
const Type dpy = invDy * stencilP_G2;
const Type dpz = invDz * stencilP_G3;
const Type dmx = invDx * stencilM_G1;
const Type dmy = invDy * stencilM_G2;
const Type dmz = invDz * stencilM_G3;
long k = kxnynz_kynz + kz;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
const Type SA2 = sqrt(1 - A * A);
const float cosThetaCosPhi = cosTheta[k] * cosPhi[k];
const float cosThetaSinPhi = cosTheta[k] * sinPhi[k];
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz;
Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy;
Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz;
Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz;
Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy;
Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz;
// combine terms for application of adjoint g3
outP_G1[k] = B * E * dPg1;
outP_G2[k] = B * E * dPg2;
outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3;
outM_G1[k] = B * (1 - F) * dMg1;
outM_G2[k] = B * (1 - F) * dMg2;
outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3;
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative
// X and Y derivatives are identically zero
{
const Type stencilP_G3 =
c8_1 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 1]) +
c8_2 * (+ inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 2]) +
c8_3 * (+ inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 3]) +
c8_4 * (+ inP_G3[kxnynz_kynz + 3] + inP_G3[kxnynz_kynz + 4]);
const Type stencilM_G3 =
c8_1 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 1]) +
c8_2 * (+ inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 2]) +
c8_3 * (+ inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 3]) +
c8_4 * (+ inM_G3[kxnynz_kynz + 3] + inM_G3[kxnynz_kynz + 4]);
const Type dpx = 0;
const Type dpy = 0;
const Type dpz = invDz * stencilP_G3;
const Type dmx = 0;
const Type dmy = 0;
const Type dmz = invDz * stencilM_G3;
const long k = kxnynz_kynz + 0;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
const Type SA2 = sqrt(1 - A * A);
const float cosThetaCosPhi = cosTheta[k] * cosPhi[k];
const float cosThetaSinPhi = cosTheta[k] * sinPhi[k];
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz;
Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy;
Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz;
Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz;
Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy;
Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz;
// combine terms for application of adjoint g3
outP_G1[k] = B * E * dPg1;
outP_G2[k] = B * E * dPg2;
outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3;
outM_G1[k] = B * (1 - F) * dMg1;
outM_G2[k] = B * (1 - F) * dMg2;
outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3;
}
// kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative
{
const Type stencilP_G11 =
c8_1 * (- inP_G1[(kx+0) * nynz + kynz + 1] + inP_G1[(kx+1) * nynz + kynz + 1]) +
c8_2 * (- inP_G1[(kx-1) * nynz + kynz + 1] + inP_G1[(kx+2) * nynz + kynz + 1]) +
c8_3 * (- inP_G1[(kx-2) * nynz + kynz + 1] + inP_G1[(kx+3) * nynz + kynz + 1]) +
c8_4 * (- inP_G1[(kx-3) * nynz + kynz + 1] + inP_G1[(kx+4) * nynz + kynz + 1]);
const Type stencilP_G21 =
c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + 1] + inP_G2[kxnynz + (ky+1) * nz + 1]) +
c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + 1] + inP_G2[kxnynz + (ky+2) * nz + 1]) +
c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + 1] + inP_G2[kxnynz + (ky+3) * nz + 1]) +
c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + 1] + inP_G2[kxnynz + (ky+4) * nz + 1]);
const Type stencilP_G31 =
c8_1 * (- inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 2]) +
c8_2 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 3]) +
c8_3 * (+ inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 4]) +
c8_4 * (+ inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 5]);
const Type stencilM_G11 =
c8_1 * (- inM_G1[(kx+0) * nynz + kynz + 1] + inM_G1[(kx+1) * nynz + kynz + 1]) +
c8_2 * (- inM_G1[(kx-1) * nynz + kynz + 1] + inM_G1[(kx+2) * nynz + kynz + 1]) +
c8_3 * (- inM_G1[(kx-2) * nynz + kynz + 1] + inM_G1[(kx+3) * nynz + kynz + 1]) +
c8_4 * (- inM_G1[(kx-3) * nynz + kynz + 1] + inM_G1[(kx+4) * nynz + kynz + 1]);
const Type stencilM_G21 =
c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + 1] + inM_G2[kxnynz + (ky+1) * nz + 1]) +
c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + 1] + inM_G2[kxnynz + (ky+2) * nz + 1]) +
c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + 1] + inM_G2[kxnynz + (ky+3) * nz + 1]) +
c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + 1] + inM_G2[kxnynz + (ky+4) * nz + 1]);
const Type stencilM_G31 =
c8_1 * (- inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 2]) +
c8_2 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 3]) +
c8_3 * (+ inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 4]) +
c8_4 * (+ inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 5]);
const Type dpx = invDx * stencilP_G11;
const Type dpy = invDy * stencilP_G21;
const Type dpz = invDz * stencilP_G31;
const Type dmx = invDx * stencilM_G11;
const Type dmy = invDy * stencilM_G21;
const Type dmz = invDz * stencilM_G31;
const long k = kxnynz_kynz + 1;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
const Type SA2 = sqrt(1 - A * A);
const float cosThetaCosPhi = cosTheta[k] * cosPhi[k];
const float cosThetaSinPhi = cosTheta[k] * sinPhi[k];
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz;
Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy;
Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz;
Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz;
Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy;
Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz;
// combine terms for application of adjoint g3
outP_G1[k] = B * E * dPg1;
outP_G2[k] = B * E * dPg2;
outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3;
outM_G1[k] = B * (1 - F) * dMg1;
outM_G2[k] = B * (1 - F) * dMg2;
outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3;
}
// kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative
{
const Type stencilP_G12 =
c8_1 * (- inP_G1[(kx+0) * nynz + kynz + 2] + inP_G1[(kx+1) * nynz + kynz + 2]) +
c8_2 * (- inP_G1[(kx-1) * nynz + kynz + 2] + inP_G1[(kx+2) * nynz + kynz + 2]) +
c8_3 * (- inP_G1[(kx-2) * nynz + kynz + 2] + inP_G1[(kx+3) * nynz + kynz + 2]) +
c8_4 * (- inP_G1[(kx-3) * nynz + kynz + 2] + inP_G1[(kx+4) * nynz + kynz + 2]);
const Type stencilP_G22 =
c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + 2] + inP_G2[kxnynz + (ky+1) * nz + 2]) +
c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + 2] + inP_G2[kxnynz + (ky+2) * nz + 2]) +
c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + 2] + inP_G2[kxnynz + (ky+3) * nz + 2]) +
c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + 2] + inP_G2[kxnynz + (ky+4) * nz + 2]);
const Type stencilP_G32 =
c8_1 * (- inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 3]) +
c8_2 * (- inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 4]) +
c8_3 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 5]) +
c8_4 * (+ inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 6]);
const Type stencilM_G12 =
c8_1 * (- inM_G1[(kx+0) * nynz + kynz + 2] + inM_G1[(kx+1) * nynz + kynz + 2]) +
c8_2 * (- inM_G1[(kx-1) * nynz + kynz + 2] + inM_G1[(kx+2) * nynz + kynz + 2]) +
c8_3 * (- inM_G1[(kx-2) * nynz + kynz + 2] + inM_G1[(kx+3) * nynz + kynz + 2]) +
c8_4 * (- inM_G1[(kx-3) * nynz + kynz + 2] + inM_G1[(kx+4) * nynz + kynz + 2]);
const Type stencilM_G22 =
c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + 2] + inM_G2[kxnynz + (ky+1) * nz + 2]) +
c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + 2] + inM_G2[kxnynz + (ky+2) * nz + 2]) +
c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + 2] + inM_G2[kxnynz + (ky+3) * nz + 2]) +
c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + 2] + inM_G2[kxnynz + (ky+4) * nz + 2]);
const Type stencilM_G32 =
c8_1 * (- inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 3]) +
c8_2 * (- inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 4]) +
c8_3 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 5]) +
c8_4 * (+ inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 6]);
const Type dpx = invDx * stencilP_G12;
const Type dpy = invDy * stencilP_G22;
const Type dpz = invDz * stencilP_G32;
const Type dmx = invDx * stencilM_G12;
const Type dmy = invDy * stencilM_G22;
const Type dmz = invDz * stencilM_G32;
const long k = kxnynz_kynz + 2;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
const Type SA2 = sqrt(1 - A * A);
const float cosThetaCosPhi = cosTheta[k] * cosPhi[k];
const float cosThetaSinPhi = cosTheta[k] * sinPhi[k];
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz;
Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy;
Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz;
Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz;
Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy;
Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz;
// combine terms for application of adjoint g3
outP_G1[k] = B * E * dPg1;
outP_G2[k] = B * E * dPg2;
outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3;
outM_G1[k] = B * (1 - F) * dMg1;
outM_G2[k] = B * (1 - F) * dMg2;
outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3;
}
// kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative
{
const Type stencilP_G13 =
c8_1 * (- inP_G1[(kx+0) * nynz + kynz + 3] + inP_G1[(kx+1) * nynz + kynz + 3]) +
c8_2 * (- inP_G1[(kx-1) * nynz + kynz + 3] + inP_G1[(kx+2) * nynz + kynz + 3]) +
c8_3 * (- inP_G1[(kx-2) * nynz + kynz + 3] + inP_G1[(kx+3) * nynz + kynz + 3]) +
c8_4 * (- inP_G1[(kx-3) * nynz + kynz + 3] + inP_G1[(kx+4) * nynz + kynz + 3]);
const Type stencilP_G23 =
c8_1 * (- inP_G2[kxnynz + (ky+0) * nz + 3] + inP_G2[kxnynz + (ky+1) * nz + 3]) +
c8_2 * (- inP_G2[kxnynz + (ky-1) * nz + 3] + inP_G2[kxnynz + (ky+2) * nz + 3]) +
c8_3 * (- inP_G2[kxnynz + (ky-2) * nz + 3] + inP_G2[kxnynz + (ky+3) * nz + 3]) +
c8_4 * (- inP_G2[kxnynz + (ky-3) * nz + 3] + inP_G2[kxnynz + (ky+4) * nz + 3]);
const Type stencilP_G33 =
c8_1 * (- inP_G3[kxnynz_kynz + 3] + inP_G3[kxnynz_kynz + 4]) +
c8_2 * (- inP_G3[kxnynz_kynz + 2] + inP_G3[kxnynz_kynz + 5]) +
c8_3 * (- inP_G3[kxnynz_kynz + 1] + inP_G3[kxnynz_kynz + 6]) +
c8_4 * (- inP_G3[kxnynz_kynz + 0] + inP_G3[kxnynz_kynz + 7]);
const Type stencilM_G13 =
c8_1 * (- inM_G1[(kx+0) * nynz + kynz + 3] + inM_G1[(kx+1) * nynz + kynz + 3]) +
c8_2 * (- inM_G1[(kx-1) * nynz + kynz + 3] + inM_G1[(kx+2) * nynz + kynz + 3]) +
c8_3 * (- inM_G1[(kx-2) * nynz + kynz + 3] + inM_G1[(kx+3) * nynz + kynz + 3]) +
c8_4 * (- inM_G1[(kx-3) * nynz + kynz + 3] + inM_G1[(kx+4) * nynz + kynz + 3]);
const Type stencilM_G23 =
c8_1 * (- inM_G2[kxnynz + (ky+0) * nz + 3] + inM_G2[kxnynz + (ky+1) * nz + 3]) +
c8_2 * (- inM_G2[kxnynz + (ky-1) * nz + 3] + inM_G2[kxnynz + (ky+2) * nz + 3]) +
c8_3 * (- inM_G2[kxnynz + (ky-2) * nz + 3] + inM_G2[kxnynz + (ky+3) * nz + 3]) +
c8_4 * (- inM_G2[kxnynz + (ky-3) * nz + 3] + inM_G2[kxnynz + (ky+4) * nz + 3]);
const Type stencilM_G33 =
c8_1 * (- inM_G3[kxnynz_kynz + 3] + inM_G3[kxnynz_kynz + 4]) +
c8_2 * (- inM_G3[kxnynz_kynz + 2] + inM_G3[kxnynz_kynz + 5]) +
c8_3 * (- inM_G3[kxnynz_kynz + 1] + inM_G3[kxnynz_kynz + 6]) +
c8_4 * (- inM_G3[kxnynz_kynz + 0] + inM_G3[kxnynz_kynz + 7]);
const Type dpx = invDx * stencilP_G13;
const Type dpy = invDy * stencilP_G23;
const Type dpz = invDz * stencilP_G33;
const Type dmx = invDx * stencilM_G13;
const Type dmy = invDy * stencilM_G23;
const Type dmz = invDz * stencilM_G33;
const long k = kxnynz_kynz + 3;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
const Type SA2 = sqrt(1 - A * A);
const float cosThetaCosPhi = cosTheta[k] * cosPhi[k];
const float cosThetaSinPhi = cosTheta[k] * sinPhi[k];
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
Type dPg1 = cosThetaCosPhi * dpx + cosThetaSinPhi * dpy - sinTheta[k] * dpz;
Type dPg2 = - sinPhi[k] * dpx + cosPhi[k] * dpy;
Type dPg3 = sinThetaCosPhi * dpx + sinTheta[k] * sinPhi[k] * dpy + cosTheta[k] * dpz;
Type dMg1 = cosThetaCosPhi * dmx + cosThetaSinPhi * dmy - sinTheta[k] * dmz;
Type dMg2 = - sinPhi[k] * dmx + cosPhi[k] * dmy;
Type dMg3 = sinThetaCosPhi * dmx + sinTheta[k] * sinPhi[k] * dmy + cosTheta[k] * dmz;
// combine terms for application of adjoint g3
outP_G1[k] = B * E * dPg1;
outP_G2[k] = B * E * dPg2;
outP_G3[k] = B * (1 - F * A * A) * dPg3 + B * F * A * SA2 * dMg3;
outM_G1[k] = B * (1 - F) * dMg1;
outM_G2[k] = B * (1 - F) * dMg2;
outM_G3[k] = B * F * A * SA2 * dPg3 + B * (1 - F + F * A * A) * dMg3;
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives3D_TTI_MinusHalf_TimeUpdate_Nonlinear(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
const Type dt,
Type * __restrict__ inP_G1,
Type * __restrict__ inP_G2,
Type * __restrict__ inP_G3,
Type * __restrict__ inM_G1,
Type * __restrict__ inM_G2,
Type * __restrict__ inM_G3,
Type * __restrict__ fieldVel,
Type * __restrict__ fieldBuoy,
Type * __restrict__ dtOmegaInvQ,
float * __restrict__ sinTheta,
float * __restrict__ cosTheta,
float * __restrict__ sinPhi,
float * __restrict__ cosPhi,
Type * __restrict__ pCur,
Type * __restrict__ mCur,
Type * __restrict__ pSpace,
Type * __restrict__ mSpace,
Type * __restrict__ pOld,
Type * __restrict__ mOld,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
const Type dt2 = dt * dt;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
pSpace[kindex1] = pSpace[kindex2] = 0;
mSpace[kindex1] = mSpace[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
pSpace[kindex1] = pSpace[kindex2] = 0;
mSpace[kindex1] = mSpace[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
pSpace[kindex1] = pSpace[kindex2] = 0;
mSpace[kindex1] = mSpace[kindex2] = 0;
}
}
}
// interior
// TODO -- this does significantly more compute than what John has in his latest stuff.
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + (kz-4);
const long kzm3 = kxnynz_kynz + (kz-3);
const long kzm2 = kxnynz_kynz + (kz-2);
const long kzm1 = kxnynz_kynz + (kz-1);
const long kzp0 = kxnynz_kynz + (kz+0);
const long kzp1 = kxnynz_kynz + (kz+1);
const long kzp2 = kxnynz_kynz + (kz+2);
const long kzp3 = kxnynz_kynz + (kz+3);
// ........................ G1 ........................
const Type stencilP_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]);
const Type stencilP_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]);
const Type stencilP_G1C =
c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]);
// ........................ G2 ........................
const Type stencilP_G2A =
c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]);
const Type stencilP_G2B =
c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]);
// ........................ G3 ........................
const Type stencilP_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]);
const Type stencilP_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]);
const Type stencilP_G3C =
c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]);
// ........................ G1 ........................
const Type stencilM_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]);
const Type stencilM_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]);
const Type stencilM_G1C =
c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]);
// ........................ G2 ........................
const Type stencilM_G2A =
c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]);
const Type stencilM_G2B =
c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]);
// ........................ G3 ........................
const Type stencilM_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]);
const Type stencilM_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]);
const Type stencilM_G3C =
c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]);
const long k = kxnynz_kynz + kz;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C;
const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B;
const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C;
const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C;
const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B;
const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C;
pSpace[k] = dpg1 + dpg2 + dpg3;
mSpace[k] = dmg1 + dmg2 + dmg3;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- at the free surface -- p = 0, dp = 0
{
const Type dpg1 = 0;
const Type dpg2 = 0;
const Type dpg3 = 0;
const Type dmg1 = 0;
const Type dmg2 = 0;
const Type dmg3 = 0;
const long k = kxnynz_kynz + 0;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dpg1 + dpg2 + dpg3;
mSpace[k] = dmg1 + dmg2 + dmg3;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 1 -- one cell below the free surface
{
const long kz = 1;
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + 2;
const long kzm3 = kxnynz_kynz + 1;
const long kzm2 = kxnynz_kynz + 0;
const long kzm1 = kxnynz_kynz + 0;
const long kzp0 = kxnynz_kynz + 1;
const long kzp1 = kxnynz_kynz + 2;
const long kzp2 = kxnynz_kynz + 3;
const long kzp3 = kxnynz_kynz + 4;
// ........................ G1 ........................
const Type stencilP_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]);
const Type stencilP_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]);
const Type stencilP_G1C =
c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]);
// ........................ G2 ........................
const Type stencilP_G2A =
c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]);
const Type stencilP_G2B =
c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]);
// ........................ G3 ........................
const Type stencilP_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]);
const Type stencilP_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]);
const Type stencilP_G3C =
c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]);
// ........................ G1 ........................
const Type stencilM_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]);
const Type stencilM_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]);
const Type stencilM_G1C =
c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]);
// ........................ G2 ........................
const Type stencilM_G2A =
c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]);
const Type stencilM_G2B =
c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]);
// ........................ G3 ........................
const Type stencilM_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]);
const Type stencilM_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]);
const Type stencilM_G3C =
c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]);
const long k = kxnynz_kynz + 1;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C;
const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B;
const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C;
const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C;
const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B;
const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C;
pSpace[k] = dpg1 + dpg2 + dpg3;
mSpace[k] = dmg1 + dmg2 + dmg3;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 2 -- two cells below the free surface
{
const long kz = 2;
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + 1;
const long kzm3 = kxnynz_kynz + 0;
const long kzm2 = kxnynz_kynz + 0;
const long kzm1 = kxnynz_kynz + 1;
const long kzp0 = kxnynz_kynz + 2;
const long kzp1 = kxnynz_kynz + 3;
const long kzp2 = kxnynz_kynz + 4;
const long kzp3 = kxnynz_kynz + 5;
// ........................ G1 ........................
const Type stencilP_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]);
const Type stencilP_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]);
const Type stencilP_G1C =
c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]);
// ........................ G2 ........................
const Type stencilP_G2A =
c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]);
const Type stencilP_G2B =
c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]);
// ........................ G3 ........................
const Type stencilP_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]);
const Type stencilP_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]);
const Type stencilP_G3C =
c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]);
// ........................ G1 ........................
const Type stencilM_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]);
const Type stencilM_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]);
const Type stencilM_G1C =
c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]);
// ........................ G2 ........................
const Type stencilM_G2A =
c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]);
const Type stencilM_G2B =
c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]);
// ........................ G3 ........................
const Type stencilM_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]);
const Type stencilM_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]);
const Type stencilM_G3C =
c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]);
const long k = kxnynz_kynz + 2;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C;
const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B;
const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C;
const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C;
const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B;
const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C;
pSpace[k] = dpg1 + dpg2 + dpg3;
mSpace[k] = dmg1 + dmg2 + dmg3;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 3 -- three cells below the free surface
{
const long kz = 3;
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + 0;
const long kzm3 = kxnynz_kynz + 0;
const long kzm2 = kxnynz_kynz + 1;
const long kzm1 = kxnynz_kynz + 2;
const long kzp0 = kxnynz_kynz + 3;
const long kzp1 = kxnynz_kynz + 4;
const long kzp2 = kxnynz_kynz + 5;
const long kzp3 = kxnynz_kynz + 6;
// ........................ G1 ........................
const Type stencilP_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inP_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inP_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inP_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inP_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inP_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inP_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inP_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inP_G1[kxp3]);
const Type stencilP_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inP_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inP_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inP_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inP_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inP_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inP_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inP_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inP_G1[kyp3]);
const Type stencilP_G1C =
c8_1 * (- sinTheta[kzm1] * inP_G1[kzm1] + sinTheta[kzp0] * inP_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inP_G1[kzm2] + sinTheta[kzp1] * inP_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inP_G1[kzm3] + sinTheta[kzp2] * inP_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inP_G1[kzm4] + sinTheta[kzp3] * inP_G1[kzp3]);
// ........................ G2 ........................
const Type stencilP_G2A =
c8_1 * (- sinPhi[kxm1] * inP_G2[kxm1] + sinPhi[kxp0] * inP_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inP_G2[kxm2] + sinPhi[kxp1] * inP_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inP_G2[kxm3] + sinPhi[kxp2] * inP_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inP_G2[kxm4] + sinPhi[kxp3] * inP_G2[kxp3]);
const Type stencilP_G2B =
c8_1 * (- cosPhi[kym1] * inP_G2[kym1] + cosPhi[kyp0] * inP_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inP_G2[kym2] + cosPhi[kyp1] * inP_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inP_G2[kym3] + cosPhi[kyp2] * inP_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inP_G2[kym4] + cosPhi[kyp3] * inP_G2[kyp3]);
// ........................ G3 ........................
const Type stencilP_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inP_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inP_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inP_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inP_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inP_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inP_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inP_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inP_G3[kxp3]);
const Type stencilP_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inP_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inP_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inP_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inP_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inP_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inP_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inP_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inP_G3[kyp3]);
const Type stencilP_G3C =
c8_1 * (- cosTheta[kzm1] * inP_G3[kzm1] + cosTheta[kzp0] * inP_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inP_G3[kzm2] + cosTheta[kzp1] * inP_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inP_G3[kzm3] + cosTheta[kzp2] * inP_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inP_G3[kzm4] + cosTheta[kzp3] * inP_G3[kzp3]);
// ........................ G1 ........................
const Type stencilM_G1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inM_G1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inM_G1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inM_G1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inM_G1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inM_G1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inM_G1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inM_G1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inM_G1[kxp3]);
const Type stencilM_G1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inM_G1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inM_G1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inM_G1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inM_G1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inM_G1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inM_G1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inM_G1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inM_G1[kyp3]);
const Type stencilM_G1C =
c8_1 * (- sinTheta[kzm1] * inM_G1[kzm1] + sinTheta[kzp0] * inM_G1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inM_G1[kzm2] + sinTheta[kzp1] * inM_G1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inM_G1[kzm3] + sinTheta[kzp2] * inM_G1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inM_G1[kzm4] + sinTheta[kzp3] * inM_G1[kzp3]);
// ........................ G2 ........................
const Type stencilM_G2A =
c8_1 * (- sinPhi[kxm1] * inM_G2[kxm1] + sinPhi[kxp0] * inM_G2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inM_G2[kxm2] + sinPhi[kxp1] * inM_G2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inM_G2[kxm3] + sinPhi[kxp2] * inM_G2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inM_G2[kxm4] + sinPhi[kxp3] * inM_G2[kxp3]);
const Type stencilM_G2B =
c8_1 * (- cosPhi[kym1] * inM_G2[kym1] + cosPhi[kyp0] * inM_G2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inM_G2[kym2] + cosPhi[kyp1] * inM_G2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inM_G2[kym3] + cosPhi[kyp2] * inM_G2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inM_G2[kym4] + cosPhi[kyp3] * inM_G2[kyp3]);
// ........................ G3 ........................
const Type stencilM_G3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inM_G3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inM_G3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inM_G3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inM_G3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inM_G3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inM_G3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inM_G3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inM_G3[kxp3]);
const Type stencilM_G3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inM_G3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inM_G3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inM_G3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inM_G3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inM_G3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inM_G3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inM_G3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inM_G3[kyp3]);
const Type stencilM_G3C =
c8_1 * (- cosTheta[kzm1] * inM_G3[kzm1] + cosTheta[kzp0] * inM_G3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inM_G3[kzm2] + cosTheta[kzp1] * inM_G3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inM_G3[kzm3] + cosTheta[kzp2] * inM_G3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inM_G3[kzm4] + cosTheta[kzp3] * inM_G3[kzp3]);
const long k = kxnynz_kynz + 3;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
const Type dpg1 = invDx * stencilP_G1A + invDy * stencilP_G1B - invDz * stencilP_G1C;
const Type dpg2 = - invDx * stencilP_G2A + invDy * stencilP_G2B;
const Type dpg3 = invDx * stencilP_G3A + invDy * stencilP_G3B + invDz * stencilP_G3C;
const Type dmg1 = invDx * stencilM_G1A + invDy * stencilM_G1B - invDz * stencilM_G1C;
const Type dmg2 = - invDx * stencilM_G2A + invDy * stencilM_G2B;
const Type dmg3 = invDx * stencilM_G3A + invDy * stencilM_G3B + invDz * stencilM_G3C;
pSpace[k] = dpg1 + dpg2 + dpg3;
mSpace[k] = dmg1 + dmg2 + dmg3;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives3D_TTI_PlusHalf(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
Type * __restrict__ inG1,
Type * __restrict__ inG2,
Type * __restrict__ inG3,
float * __restrict__ sinTheta,
float * __restrict__ cosTheta,
float * __restrict__ sinPhi,
float * __restrict__ cosPhi,
Type * __restrict__ outG1,
Type * __restrict__ outG2,
Type * __restrict__ outG3,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
long kindex1 = kx * ny * nz + ky * nz + k;
long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
outG1[kindex1] = outG1[kindex2] = 0;
outG2[kindex1] = outG2[kindex2] = 0;
outG3[kindex1] = outG3[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
long kindex1 = kx * ny * nz + k * nz + kz;
long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
outG1[kindex1] = outG1[kindex2] = 0;
outG2[kindex1] = outG2[kindex2] = 0;
outG3[kindex1] = outG3[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
long kindex1 = k * ny * nz + ky * nz + kz;
long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
outG1[kindex1] = outG1[kindex2] = 0;
outG2[kindex1] = outG2[kindex2] = 0;
outG3[kindex1] = outG3[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kynz_kz = + kynz + kz;
const Type stencilG1 =
c8_1 * (- inG1[(kx+0) * nynz + kynz_kz] + inG1[(kx+1) * nynz + kynz_kz]) +
c8_2 * (- inG1[(kx-1) * nynz + kynz_kz] + inG1[(kx+2) * nynz + kynz_kz]) +
c8_3 * (- inG1[(kx-2) * nynz + kynz_kz] + inG1[(kx+3) * nynz + kynz_kz]) +
c8_4 * (- inG1[(kx-3) * nynz + kynz_kz] + inG1[(kx+4) * nynz + kynz_kz]);
const Type stencilG2 =
c8_1 * (- inG2[kxnynz + (ky+0) * nz + kz] + inG2[kxnynz + (ky+1) * nz + kz]) +
c8_2 * (- inG2[kxnynz + (ky-1) * nz + kz] + inG2[kxnynz + (ky+2) * nz + kz]) +
c8_3 * (- inG2[kxnynz + (ky-2) * nz + kz] + inG2[kxnynz + (ky+3) * nz + kz]) +
c8_4 * (- inG2[kxnynz + (ky-3) * nz + kz] + inG2[kxnynz + (ky+4) * nz + kz]);
const Type stencilG3 =
c8_1 * (- inG3[kxnynz_kynz + (kz+0)] + inG3[kxnynz_kynz + (kz+1)]) +
c8_2 * (- inG3[kxnynz_kynz + (kz-1)] + inG3[kxnynz_kynz + (kz+2)]) +
c8_3 * (- inG3[kxnynz_kynz + (kz-2)] + inG3[kxnynz_kynz + (kz+3)]) +
c8_4 * (- inG3[kxnynz_kynz + (kz-3)] + inG3[kxnynz_kynz + (kz+4)]);
long k = kxnynz_kynz + kz;
const Type dx = invDx * stencilG1;
const Type dy = invDy * stencilG2;
const Type dz = invDz * stencilG3;
const float cosThetaCosPhi = cosTheta[k] * cosPhi[k];
const float cosThetaSinPhi = cosTheta[k] * sinPhi[k];
const float sinThetaCosPhi = sinTheta[k] * cosPhi[k];
outG1[k] = cosThetaCosPhi * dx + cosThetaSinPhi * dy - sinTheta[k] * dz;
outG2[k] = - sinPhi[k] * dx + cosPhi[k] * dy;
outG3[k] = sinThetaCosPhi * dx + sinTheta[k] * sinPhi[k] * dy + cosTheta[k] * dz;
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative
// X and Y derivatives are identically zero
{
const Type stencilG30 =
c8_1 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 1]) +
c8_2 * (+ inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 2]) +
c8_3 * (+ inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 3]) +
c8_4 * (+ inG3[kxnynz_kynz + 3] + inG3[kxnynz_kynz + 4]);
const long k0 = kxnynz_kynz + 0;
const Type dz0 = invDz * stencilG30;
outG1[k0] = -sinTheta[k0] * dz0;
outG2[k0] = 0;
outG3[k0] = cosTheta[k0] * dz0;
}
// kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative
{
const Type stencilG11 =
c8_1 * (- inG1[(kx+0) * nynz + kynz + 1] + inG1[(kx+1) * nynz + kynz + 1]) +
c8_2 * (- inG1[(kx-1) * nynz + kynz + 1] + inG1[(kx+2) * nynz + kynz + 1]) +
c8_3 * (- inG1[(kx-2) * nynz + kynz + 1] + inG1[(kx+3) * nynz + kynz + 1]) +
c8_4 * (- inG1[(kx-3) * nynz + kynz + 1] + inG1[(kx+4) * nynz + kynz + 1]);
const Type stencilG21 =
c8_1 * (- inG2[kxnynz + (ky+0) * nz + 1] + inG2[kxnynz + (ky+1) * nz + 1]) +
c8_2 * (- inG2[kxnynz + (ky-1) * nz + 1] + inG2[kxnynz + (ky+2) * nz + 1]) +
c8_3 * (- inG2[kxnynz + (ky-2) * nz + 1] + inG2[kxnynz + (ky+3) * nz + 1]) +
c8_4 * (- inG2[kxnynz + (ky-3) * nz + 1] + inG2[kxnynz + (ky+4) * nz + 1]);
const Type stencilG31 =
c8_1 * (- inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 2]) +
c8_2 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 3]) +
c8_3 * (+ inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 4]) +
c8_4 * (+ inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 5]);
const long k1 = kxnynz_kynz + 1;
const Type dx1 = invDx * stencilG11;
const Type dy1 = invDy * stencilG21;
const Type dz1 = invDz * stencilG31;
outG1[k1] = cosTheta[k1] * cosPhi[k1] * dx1 + cosTheta[k1] * sinPhi[k1] * dy1 - sinTheta[k1] * dz1;
outG2[k1] = - sinPhi[k1] * dx1 + cosPhi[k1] * dy1;
outG3[k1] = sinTheta[k1] * cosPhi[k1] * dx1 + sinTheta[k1] * sinPhi[k1] * dy1 + cosTheta[k1] * dz1;
}
// kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative
{
const Type stencilG12 =
c8_1 * (- inG1[(kx+0) * nynz + kynz + 2] + inG1[(kx+1) * nynz + kynz + 2]) +
c8_2 * (- inG1[(kx-1) * nynz + kynz + 2] + inG1[(kx+2) * nynz + kynz + 2]) +
c8_3 * (- inG1[(kx-2) * nynz + kynz + 2] + inG1[(kx+3) * nynz + kynz + 2]) +
c8_4 * (- inG1[(kx-3) * nynz + kynz + 2] + inG1[(kx+4) * nynz + kynz + 2]);
const Type stencilG22 =
c8_1 * (- inG2[kxnynz + (ky+0) * nz + 2] + inG2[kxnynz + (ky+1) * nz + 2]) +
c8_2 * (- inG2[kxnynz + (ky-1) * nz + 2] + inG2[kxnynz + (ky+2) * nz + 2]) +
c8_3 * (- inG2[kxnynz + (ky-2) * nz + 2] + inG2[kxnynz + (ky+3) * nz + 2]) +
c8_4 * (- inG2[kxnynz + (ky-3) * nz + 2] + inG2[kxnynz + (ky+4) * nz + 2]);
const Type stencilG32 =
c8_1 * (- inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 3]) +
c8_2 * (- inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 4]) +
c8_3 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 5]) +
c8_4 * (+ inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 6]);
const long k2 = kxnynz_kynz + 2;
const Type dx2 = invDx * stencilG12;
const Type dy2 = invDy * stencilG22;
const Type dz2 = invDz * stencilG32;
outG1[k2] = cosTheta[k2] * cosPhi[k2] * dx2 + cosTheta[k2] * sinPhi[k2] * dy2 - sinTheta[k2] * dz2;
outG2[k2] = - sinPhi[k2] * dx2 + cosPhi[k2] * dy2;
outG3[k2] = sinTheta[k2] * cosPhi[k2] * dx2 + sinTheta[k2] * sinPhi[k2] * dy2 + cosTheta[k2] * dz2;
}
// kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative
{
const Type stencilG13 =
c8_1 * (- inG1[(kx+0) * nynz + kynz + 3] + inG1[(kx+1) * nynz + kynz + 3]) +
c8_2 * (- inG1[(kx-1) * nynz + kynz + 3] + inG1[(kx+2) * nynz + kynz + 3]) +
c8_3 * (- inG1[(kx-2) * nynz + kynz + 3] + inG1[(kx+3) * nynz + kynz + 3]) +
c8_4 * (- inG1[(kx-3) * nynz + kynz + 3] + inG1[(kx+4) * nynz + kynz + 3]);
const Type stencilG23 =
c8_1 * (- inG2[kxnynz + (ky+0) * nz + 3] + inG2[kxnynz + (ky+1) * nz + 3]) +
c8_2 * (- inG2[kxnynz + (ky-1) * nz + 3] + inG2[kxnynz + (ky+2) * nz + 3]) +
c8_3 * (- inG2[kxnynz + (ky-2) * nz + 3] + inG2[kxnynz + (ky+3) * nz + 3]) +
c8_4 * (- inG2[kxnynz + (ky-3) * nz + 3] + inG2[kxnynz + (ky+4) * nz + 3]);
const Type stencilG33 =
c8_1 * (- inG3[kxnynz_kynz + 3] + inG3[kxnynz_kynz + 4]) +
c8_2 * (- inG3[kxnynz_kynz + 2] + inG3[kxnynz_kynz + 5]) +
c8_3 * (- inG3[kxnynz_kynz + 1] + inG3[kxnynz_kynz + 6]) +
c8_4 * (- inG3[kxnynz_kynz + 0] + inG3[kxnynz_kynz + 7]);
const long k3 = kxnynz_kynz + 3;
const Type dx3 = invDx * stencilG13;
const Type dy3 = invDy * stencilG23;
const Type dz3 = invDz * stencilG33;
outG1[k3] = cosTheta[k3] * cosPhi[k3] * dx3 + cosTheta[k3] * sinPhi[k3] * dy3 - sinTheta[k3] * dz3;
outG2[k3] = - sinPhi[k3] * dx3 + cosPhi[k3] * dy3;
outG3[k3] = sinTheta[k3] * cosPhi[k3] * dx3 + sinTheta[k3] * sinPhi[k3] * dy3 + cosTheta[k3] * dz3;
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives3D_TTI_MinusHalf(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
Type * __restrict__ inG1,
Type * __restrict__ inG2,
Type * __restrict__ inG3,
float * __restrict__ sinTheta,
float * __restrict__ cosTheta,
float * __restrict__ sinPhi,
float * __restrict__ cosPhi,
Type * __restrict__ outG1,
Type * __restrict__ outG2,
Type * __restrict__ outG3,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
outG1[kindex1] = outG1[kindex2] = 0;
outG2[kindex1] = outG2[kindex2] = 0;
outG3[kindex1] = outG3[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
outG1[kindex1] = outG1[kindex2] = 0;
outG2[kindex1] = outG2[kindex2] = 0;
outG3[kindex1] = outG3[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
outG1[kindex1] = outG1[kindex2] = 0;
outG2[kindex1] = outG2[kindex2] = 0;
outG3[kindex1] = outG3[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + (kz-4);
const long kzm3 = kxnynz_kynz + (kz-3);
const long kzm2 = kxnynz_kynz + (kz-2);
const long kzm1 = kxnynz_kynz + (kz-1);
const long kzp0 = kxnynz_kynz + (kz+0);
const long kzp1 = kxnynz_kynz + (kz+1);
const long kzp2 = kxnynz_kynz + (kz+2);
const long kzp3 = kxnynz_kynz + (kz+3);
// ........................ G1 ........................
const Type stencilG1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]);
const Type stencilG1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]);
const Type stencilG1C =
c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]);
// ........................ G2 ........................
const Type stencilG2A =
c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]);
const Type stencilG2B =
c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]);
// ........................ G3 ........................
const Type stencilG3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]);
const Type stencilG3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]);
const Type stencilG3C =
c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]);
const long k = kxnynz_kynz + kz;
outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C;
outG2[k] = - invDx * stencilG2A + invDy * stencilG2B;
outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C;
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
{
// kz = 0 -- at the free surface -- p = 0, dp = 0
const long k = kxnynz_kynz + 0;
outG1[k] = 0;
outG2[k] = 0;
outG3[k] = 0;
}
// kz = 1 -- one cell below the free surface
{
const long kz = 1;
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + 2;
const long kzm3 = kxnynz_kynz + 1;
const long kzm2 = kxnynz_kynz + 0;
const long kzm1 = kxnynz_kynz + 0;
const long kzp0 = kxnynz_kynz + 1;
const long kzp1 = kxnynz_kynz + 2;
const long kzp2 = kxnynz_kynz + 3;
const long kzp3 = kxnynz_kynz + 4;
// ........................ G1 ........................
const Type stencilG1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]);
const Type stencilG1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]);
const Type stencilG1C =
c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]);
// ........................ G2 ........................
const Type stencilG2A =
c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]);
const Type stencilG2B =
c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]);
// ........................ G3 ........................
const Type stencilG3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]);
const Type stencilG3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]);
const Type stencilG3C =
c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]);
const long k = kxnynz_kynz + kz;
outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C;
outG2[k] = - invDx * stencilG2A + invDy * stencilG2B;
outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C;
}
// kz = 2 -- two cells below the free surface
{
const long kz = 2;
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + 1;
const long kzm3 = kxnynz_kynz + 0;
const long kzm2 = kxnynz_kynz + 0;
const long kzm1 = kxnynz_kynz + 1;
const long kzp0 = kxnynz_kynz + 2;
const long kzp1 = kxnynz_kynz + 3;
const long kzp2 = kxnynz_kynz + 4;
const long kzp3 = kxnynz_kynz + 5;
// ........................ G1 ........................
const Type stencilG1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]);
const Type stencilG1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]);
const Type stencilG1C =
c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]);
// ........................ G2 ........................
const Type stencilG2A =
c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]);
const Type stencilG2B =
c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]);
// ........................ G3 ........................
const Type stencilG3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]);
const Type stencilG3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]);
const Type stencilG3C =
c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]);
const long k = kxnynz_kynz + kz;
outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C;
outG2[k] = - invDx * stencilG2A + invDy * stencilG2B;
outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C;
}
// kz = 3 -- three cells below the free surface
{
const long kz = 3;
const long kynz_kz = + kynz + kz;
const long kxm4 = (kx-4) * nynz + kynz_kz;
const long kxm3 = (kx-3) * nynz + kynz_kz;
const long kxm2 = (kx-2) * nynz + kynz_kz;
const long kxm1 = (kx-1) * nynz + kynz_kz;
const long kxp0 = (kx+0) * nynz + kynz_kz;
const long kxp1 = (kx+1) * nynz + kynz_kz;
const long kxp2 = (kx+2) * nynz + kynz_kz;
const long kxp3 = (kx+3) * nynz + kynz_kz;
const long kym4 = kxnynz + (ky-4) * nz + kz;
const long kym3 = kxnynz + (ky-3) * nz + kz;
const long kym2 = kxnynz + (ky-2) * nz + kz;
const long kym1 = kxnynz + (ky-1) * nz + kz;
const long kyp0 = kxnynz + (ky+0) * nz + kz;
const long kyp1 = kxnynz + (ky+1) * nz + kz;
const long kyp2 = kxnynz + (ky+2) * nz + kz;
const long kyp3 = kxnynz + (ky+3) * nz + kz;
const long kzm4 = kxnynz_kynz + 0;
const long kzm3 = kxnynz_kynz + 0;
const long kzm2 = kxnynz_kynz + 1;
const long kzm1 = kxnynz_kynz + 2;
const long kzp0 = kxnynz_kynz + 3;
const long kzp1 = kxnynz_kynz + 4;
const long kzp2 = kxnynz_kynz + 5;
const long kzp3 = kxnynz_kynz + 6;
// ........................ G1 ........................
const Type stencilG1A =
c8_1 * (- cosTheta[kxm1] * cosPhi[kxm1] * inG1[kxm1] + cosTheta[kxp0] * cosPhi[kxp0] * inG1[kxp0]) +
c8_2 * (- cosTheta[kxm2] * cosPhi[kxm2] * inG1[kxm2] + cosTheta[kxp1] * cosPhi[kxp1] * inG1[kxp1]) +
c8_3 * (- cosTheta[kxm3] * cosPhi[kxm3] * inG1[kxm3] + cosTheta[kxp2] * cosPhi[kxp2] * inG1[kxp2]) +
c8_4 * (- cosTheta[kxm4] * cosPhi[kxm4] * inG1[kxm4] + cosTheta[kxp3] * cosPhi[kxp3] * inG1[kxp3]);
const Type stencilG1B =
c8_1 * (- cosTheta[kym1] * sinPhi[kym1] * inG1[kym1] + cosTheta[kyp0] * sinPhi[kyp0] * inG1[kyp0]) +
c8_2 * (- cosTheta[kym2] * sinPhi[kym2] * inG1[kym2] + cosTheta[kyp1] * sinPhi[kyp1] * inG1[kyp1]) +
c8_3 * (- cosTheta[kym3] * sinPhi[kym3] * inG1[kym3] + cosTheta[kyp2] * sinPhi[kyp2] * inG1[kyp2]) +
c8_4 * (- cosTheta[kym4] * sinPhi[kym4] * inG1[kym4] + cosTheta[kyp3] * sinPhi[kyp3] * inG1[kyp3]);
const Type stencilG1C =
c8_1 * (- sinTheta[kzm1] * inG1[kzm1] + sinTheta[kzp0] * inG1[kzp0]) +
c8_2 * (- sinTheta[kzm2] * inG1[kzm2] + sinTheta[kzp1] * inG1[kzp1]) +
c8_3 * (- sinTheta[kzm3] * inG1[kzm3] + sinTheta[kzp2] * inG1[kzp2]) +
c8_4 * (- sinTheta[kzm4] * inG1[kzm4] + sinTheta[kzp3] * inG1[kzp3]);
// ........................ G2 ........................
const Type stencilG2A =
c8_1 * (- sinPhi[kxm1] * inG2[kxm1] + sinPhi[kxp0] * inG2[kxp0]) +
c8_2 * (- sinPhi[kxm2] * inG2[kxm2] + sinPhi[kxp1] * inG2[kxp1]) +
c8_3 * (- sinPhi[kxm3] * inG2[kxm3] + sinPhi[kxp2] * inG2[kxp2]) +
c8_4 * (- sinPhi[kxm4] * inG2[kxm4] + sinPhi[kxp3] * inG2[kxp3]);
const Type stencilG2B =
c8_1 * (- cosPhi[kym1] * inG2[kym1] + cosPhi[kyp0] * inG2[kyp0]) +
c8_2 * (- cosPhi[kym2] * inG2[kym2] + cosPhi[kyp1] * inG2[kyp1]) +
c8_3 * (- cosPhi[kym3] * inG2[kym3] + cosPhi[kyp2] * inG2[kyp2]) +
c8_4 * (- cosPhi[kym4] * inG2[kym4] + cosPhi[kyp3] * inG2[kyp3]);
// ........................ G3 ........................
const Type stencilG3A =
c8_1 * (- sinTheta[kxm1] * cosPhi[kxm1] * inG3[kxm1] + sinTheta[kxp0] * cosPhi[kxp0] * inG3[kxp0]) +
c8_2 * (- sinTheta[kxm2] * cosPhi[kxm2] * inG3[kxm2] + sinTheta[kxp1] * cosPhi[kxp1] * inG3[kxp1]) +
c8_3 * (- sinTheta[kxm3] * cosPhi[kxm3] * inG3[kxm3] + sinTheta[kxp2] * cosPhi[kxp2] * inG3[kxp2]) +
c8_4 * (- sinTheta[kxm4] * cosPhi[kxm4] * inG3[kxm4] + sinTheta[kxp3] * cosPhi[kxp3] * inG3[kxp3]);
const Type stencilG3B =
c8_1 * (- sinTheta[kym1] * sinPhi[kym1] * inG3[kym1] + sinTheta[kyp0] * sinPhi[kyp0] * inG3[kyp0]) +
c8_2 * (- sinTheta[kym2] * sinPhi[kym2] * inG3[kym2] + sinTheta[kyp1] * sinPhi[kyp1] * inG3[kyp1]) +
c8_3 * (- sinTheta[kym3] * sinPhi[kym3] * inG3[kym3] + sinTheta[kyp2] * sinPhi[kyp2] * inG3[kyp2]) +
c8_4 * (- sinTheta[kym4] * sinPhi[kym4] * inG3[kym4] + sinTheta[kyp3] * sinPhi[kyp3] * inG3[kyp3]);
const Type stencilG3C =
c8_1 * (- cosTheta[kzm1] * inG3[kzm1] + cosTheta[kzp0] * inG3[kzp0]) +
c8_2 * (- cosTheta[kzm2] * inG3[kzm2] + cosTheta[kzp1] * inG3[kzp1]) +
c8_3 * (- cosTheta[kzm3] * inG3[kzm3] + cosTheta[kzp2] * inG3[kzp2]) +
c8_4 * (- cosTheta[kzm4] * inG3[kzm4] + cosTheta[kzp3] * inG3[kzp3]);
const long k = kxnynz_kynz + kz;
outG1[k] = invDx * stencilG1A + invDy * stencilG1B - invDz * stencilG1C;
outG2[k] = - invDx * stencilG2A + invDy * stencilG2B;
outG3[k] = invDx * stencilG3A + invDy * stencilG3B + invDz * stencilG3C;
}
}
}
}
}
};
#endif
|
omptriangle.c | #include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]) {
const int n = 20;
int** the_array = (int**) malloc(sizeof(int*) * n);
int i = 0;
for(i = 0; i < n; ++i) {
the_array[i] = (int*) malloc(sizeof(int) * n);
}
#pragma omp parallel for
for(i = 0; i < n; ++i) {
int j = 0;
for(j = 0; j < n; ++j) {
if(j < i) {
the_array[i][j] = 1;
} else {
the_array[i][j] = 0;
}
}
}
printf("The matrix is:\n\n");
for(i = 0; i < n; ++i) {
int j = 0;
for(j = 0; j < n; ++j) {
printf("%d ", the_array[i][j]);
}
printf("\n");
}
}
|
GB_unaryop__identity_int32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_int32
// op(A') function: GB_tran__identity_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_int32
(
int32_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_subassign_09.c | //------------------------------------------------------------------------------
// GB_subassign_09: C(I,J)<M,repl> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 09: C(I,J)<M,repl> = scalar ; using S
// M: present
// Mask_comp: false
// C_replace: true
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap or full
#include "GB_unused.h"
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_09
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 09: C(I,J)<M,repl> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in M+S must be examined. All entries in S
// are modified: if M(i,j)=1 then S(i,j) is used to write to the
// corresponding entry in C. If M(i,j) is not present, or zero, then the
// entry in C is cleared (because of C_replace). If S(i,j) is not present,
// and M(i,j)=1, then the scalar is inserted into C. The only case that
// can be skipped is if neither S nor M is present. As a result, this
// method need not traverse all of IxJ. It can limit its traversal to the
// pattern of M+S.
// Method 09 and Method 11 are very similar.
//--------------------------------------------------------------------------
// Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all M+S
GB_SUBASSIGN_TWO_SLICE (M, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (Sfound && !mij)
{
// S (i,j) is present but M (i,j) is false
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && mij)
{
// S (i,j) present and M (i,j) is true
GB_C_S_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_C_S_LOOKUP ;
if (GB_mcast (Mx, pM, msize))
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list S (:,j) has entries. List M (:,j) exhausted.
while (pS < pS_end)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-----------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iM = GBI (Mi, pM, Mvlen) ;
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
vssched.h | #pragma omp parallel for collapse(2)
for (long k = GZ; k < N + GZ; k += TILEK)
for (long j = GZ; j < N + GZ; j += TILEJ)
for (long i = GZ; i < N + GZ; i += TILEI)
|
hill_paralelo.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
int** reserveMemoryMatrix(int rows, int columns);
void fillMatrix(int** matrix, int dimension);
void printMatrix(int** matrix, int rows, int columns);
char* getAlphabet();
// Functions for read file and get a string clean for encrypt
char* readFile(char *filename);
void replaceAndremoveSpaces(char* string,char* alphabet);
int checkIfExist(char character, char* alphabet);
// Function for fill string only if it's necessary
char* completeText(char* string, int numberMissingCharacters);
// Functions for convert string to vectors of numbers
int** separateStringToVectors(char* stringToSeparate, int numberOfVectors, int dimension, char* alphabet);
int* convertVectorToNumbers(char* vector, char* alphabet);
int* multiplyVector(int** matrix, int *vector, int dimension, int module);
char* convertNumbersToStrign(int *vector, char* alphabet, int dimension);
char* encryptVector(int** matrix, char * alphabet, int dimension, int numberOfVectors);
void printFile(char* fileName, char* string);
int main(int argc, char *argv[]){
int dimension, **nonSingularMatrix, *vector, lenghtAlphabet, **matrixOfVectors, module;
char *stringToEncrypt, *alphabet, *textEncrypt;
int i;
if(argc != 2){
printf("numero de parametros incorrecto: program <file>\n");
return 0;
}
#pragma omp parallel
{
#pragma omp single
{
alphabet = getAlphabet();
lenghtAlphabet = strlen(alphabet);
}
#pragma omp single
{
printf("Ingresa la dimension de la matriz: ");
scanf("%d", &dimension);
}
#pragma omp single nowait
{
stringToEncrypt = readFile(argv[1]);
}
}
#pragma omp parallel sections
{
#pragma omp section
{
nonSingularMatrix = reserveMemoryMatrix(dimension, dimension);
}
#pragma omp section
{
// Removing blank spaces for string to encrypt, pass by value is used
replaceAndremoveSpaces(stringToEncrypt,alphabet);
}
}
#pragma omp parallel sections
{
#pragma omp section
fillMatrix(nonSingularMatrix, dimension);
#pragma omp section
{
module = strlen(stringToEncrypt) % dimension;
if(module != 0){
// printf("Necesita completarse\n");
int numberMissingCharacters = dimension - module;
stringToEncrypt = completeText(stringToEncrypt, numberMissingCharacters);
}
}
}
int numberOfVectors = strlen(stringToEncrypt)/dimension;
matrixOfVectors = separateStringToVectors(stringToEncrypt, numberOfVectors, dimension, alphabet);
#pragma omp parallel for private(i) shared(nonSingularMatrix, matrixOfVectors) firstprivate(dimension, alphabet)
for (i = 0; i < numberOfVectors; i++)
matrixOfVectors[i] = multiplyVector(nonSingularMatrix, matrixOfVectors[i], dimension, strlen(alphabet));
textEncrypt = encryptVector(matrixOfVectors, alphabet, dimension, numberOfVectors);
printFile("salida.txt",textEncrypt);
printf("Texto cifrado en archivo salida.txt\n");
return 0;
}
char* getAlphabet(){
// return "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz .,;-!?_";
// return "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
return "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
}
char* readFile(char *fileName){
char *buffer = NULL;
int string_size, read_size;
FILE *file = fopen(fileName, "r");
if (file){
// Seek the last byte of the file
fseek(file, 0, SEEK_END);
// Offset from the first to the last byte, or in other words, filesize
string_size = ftell(file);
// go back to the start of the file
rewind(file);
// Allocate a string that can hold it all
buffer = (char*) malloc(sizeof(char) * (string_size + 1) );
// Read it all in one operation
read_size = fread(buffer, sizeof(char), string_size, file);
// fread doesn't set it so put a \0 in the last position
// and buffer is now officially a string
buffer[string_size] = '\0';
if(string_size != read_size){
// Something went wrong, throw away the memory and set
// the buffer to NULL
free(buffer);
buffer = NULL;
}
// Always remember to close the file.
fclose(file);
}
return buffer;
}
int** reserveMemoryMatrix(int rows, int columns){
int **matrix = (int **) calloc(rows, sizeof(int*));
int i;
for (i = 0; i < rows; i++)
matrix[i] = (int*) calloc(columns, sizeof(int));
return matrix;
}
void replaceAndremoveSpaces(char* str, char* alphabet) {
size_t str_len = strlen(str);
char result[str_len];
size_t p = 0;
size_t i = 0;
for (i = 0; i < str_len; ++i) {
if (str[i] != ' ') {
if(str[i] == 10 || str[i] == 11 || str[i] == 13 || checkIfExist(str[i], alphabet)){
result[p] = str[i];
}else{
switch((unsigned int)str[i]){
case 165: //Ñ
result[p] = 'N';
break;
case 164: //ñ
result[p] = 'n';
break;
default:
result[p] = 'X';
}
}
p++;
}
}
if (p < str_len){
str[p] = '\0';
}
for (i = 0; i < p; ++i) {
str[i] = result[i];
}
}
int checkIfExist(char character, char* alphabet){
for (int j = 0; j < strlen(alphabet); j++){
if (character == alphabet[j]){
return 1;
}
}
return 0;
}
void fillMatrix(int** matrix, int dimension){
int i,j;
for (i = 0; i < dimension; i++){
for (j = 0; j < dimension; j++){
printf("Ingresa el elemento [%d][%d] de la matriz: ",i,j);
scanf("%d",&matrix[i][j]);
}
}
}
char* completeText(char* string, int numberMissingCharacters){
char *new_string = (char*) calloc (strlen(string)+numberMissingCharacters, sizeof(char));
char *complements = (char*) calloc (numberMissingCharacters, sizeof(char));
for (int i = 0; i < numberMissingCharacters; i++)
complements[i] = 'X';
strcpy(new_string, string);
strcat(new_string, complements);
return new_string;
}
int** separateStringToVectors(char* stringToSeparate, int numberOfVectors, int dimension, char* alphabet){
char **arrayOfStrings;
int **matrixOfNumbers;
int i,j;
arrayOfStrings = (char**) calloc(numberOfVectors, sizeof(char*));
#pragma omp parallel for shared(arrayOfStrings, dimension) private(i)
for (i = 0; i < numberOfVectors; i++)
arrayOfStrings[i] = (char*) calloc(dimension, sizeof(char));
// Resever memory for matrix containing numbers vectors
matrixOfNumbers = reserveMemoryMatrix(numberOfVectors, dimension);
// Separate string each 'dimension' elements
#pragma omp parallel for private(i,j) shared(arrayOfStrings, stringToSeparate, dimension)
for (i = 0; i < numberOfVectors; i++){
for (j = 0; j < dimension; j++){
arrayOfStrings[i][j] = stringToSeparate[(i*dimension)+j];
}
}
// Convert each string to numbers according to the alphabet
#pragma omp parallel for private(i) shared(matrixOfNumbers,arrayOfStrings, alphabet)
for (i = 0; i < numberOfVectors; i++)
matrixOfNumbers[i] = convertVectorToNumbers(arrayOfStrings[i], alphabet);
return matrixOfNumbers;
}
void printFile(char* fileName, char* string){
FILE * file = fopen(fileName, "w");
if(file){
fputs(string,file);
fclose(file);
}
}
int* convertVectorToNumbers(char* vector, char* alphabet){
int* vectorOFNumbers = (int*) calloc(strlen(vector), sizeof(int));
for (int i = 0; i < strlen(vector); i++){
for (int j = 0; j < strlen(alphabet); j++){
if (vector[i] == alphabet[j]){
vectorOFNumbers[i] = j;
}
}
}
return vectorOFNumbers;
}
char* encryptVector(int** matrix, char * alphabet, int dimension, int numberOfVectors){
char* ev = (char*) calloc(dimension*numberOfVectors,sizeof(char));
for(int i=0; i<numberOfVectors; i++){
ev = strcat(ev, convertNumbersToStrign(matrix[i],alphabet,dimension));
}
return ev;
}
char* convertNumbersToStrign(int *vector, char* alphabet, int dimension){
char* str = (char*) calloc(dimension,sizeof(char));
int pos;
for(int i = 0 ; i<dimension ; i++){
str[i] = alphabet[vector[i]];
}
return str;
}
int* multiplyVector(int** matrix, int *vector, int dimension, int module){
//Reserving memory for new vector
int *resultVector = (int*) calloc(dimension, sizeof(int));
int i, j, tmp;
//Iterating the matrix and multiplying by the vector pointer
for(i=0; i < dimension; i++) {
for(j=0; j < dimension; j++) {
*(resultVector + i) += (matrix[i][j] * (*(vector + j)));
}
// Apply module
*(resultVector+i) = *(resultVector+i) % module;
}
return resultVector;
}
void printMatrix(int** matrix, int rows, int columns){
for (int i = 0; i < rows; i++){
for (int j = 0; j < columns; j++){
printf("%d ",matrix[i][j]);
}
printf("\n");
}
}
|
convolution_1x1_pack8_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const signed char* r0 = bottom_blob.channel(p);
signed char* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
int8x8_t _v0 = vld1_s8(r0);
int8x8_t _v1 = vld1_s8(r0 + 16);
int8x8_t _v2 = vld1_s8(r0 + 32);
int8x8_t _v3 = vld1_s8(r0 + 48);
vst1_s8(outptr, _v0);
vst1_s8(outptr + 8, _v1);
vst1_s8(outptr + 16, _v2);
vst1_s8(outptr + 24, _v3);
r0 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
int8x8_t _v0 = vld1_s8(r0);
int8x8_t _v1 = vld1_s8(r0 + 16);
vst1_s8(outptr, _v0);
vst1_s8(outptr + 8, _v1);
r0 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
int8x8_t _v = vld1_s8(r0);
vst1_s8(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
pmtv-OpenMP.c | /*
* pmtv-OpenMP.c
*
* Created on: 04/05/2014
* Author: Carlos de la Torre
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h> // biblioteca para programas paralelos
#else
#define omp_get_thread_num() 0
#endif
#define PRINT_ALL_MIN 15
// Ponemos que los elementos mínimos para que se
// impriman todos los valores de la matriz sea 15
#define NELEMENTOS(x) (sizeof(x) / sizeof(x[0]))
// Con esto lo que hacemos es saber cual es el numero
// de elementos de cualquier vector de C solo tenemos
// que poner donde pone x el nombre del vector
#define DEBUGMODE 1
// con esta definición nos aseguramos que solo
// salgan las cifras de tiempo en cada ejecución
// así de esa manera es mas fácil realizar el
// estudio empírico del programa
void error(char* param[]){
printf("\n [USAGE]-%s [num iteraciones] [planificación] [num chunk] [num hebras]\n"
" para mas información %s --help\n\n", param[0],param[0]);
if (param[1]==NULL){
fprintf(stderr, " [ERROR]-Falta iteraciones\n");
exit(-1);
}else if (param[2]==NULL){
fprintf(stderr, " [ERROR]-Falta modo de planificación: static, dynamic, guided\n");
exit(-1);
}else if (param[3]==NULL){
fprintf(stderr, " [ERROR]-Falta chunk\n");
exit(-1);
}
// }else if (argv[4]==NULL){
// fprintf(stderr, " [ERROR]-Falta numero de hebras\n");
// exit(-1);
// }
exit(-1);
}
int main(int argc, char* argv[]) {
int f,c,N;
char planificacion[10], chunk[2]="", hebras[2], tmp[6], help[6]="--help";
char statics[9]="static", dynamic[10]="dynamic", guided[9]="guided";
double tr, t1, t2;
if (argc!=2 && argc!=3 && argc!=4 && argc!=5){
error(argv);
}else if (argc==2){
if (getenv("OMP_SCHEDULE")!=NULL){
N = atoi(argv[1]); // Este sera el tamaño del vector y de las filas/columnas de la matriz
snprintf(hebras, sizeof(int), "%d", omp_get_num_procs());
if (!strncmp(dynamic,getenv("OMP_SCHEDULE"),7))
setenv("OMP_DYNAMIC","TRUE",1);
strcpy(planificacion,getenv("OMP_SCHEDULE"));
}else{
strcpy(tmp,argv[1]);
if (!strncmp(tmp,help,6)){
printf("\n [USAGE]-%s [num iteraciones] [planificación] [num chunk] [num hebras]\n\n"
" Tambien se puede utilizar la variable de entorno\n"
" OMP_SCHEDULE para modificar la planificación\n\n"
" Ejemplos:\n"
" export OMP_SCHEDULE=\"static,4\"\n"
" %s 10 la cantidad de hebras se asignará según el número de cores\n\n O\n"
" %s 10 dynamic 4 4\n\n",argv[0],argv[0],argv[0]);
exit(-1);
}else
error(argv);
}
}else if (argc==3){
N = atoi(argv[1]); // Este sera el tamaño del vector y de las filas/columnas de la matriz
strcpy(planificacion,argv[2]); // Esto es para poder capturar el texto desde consola
sprintf(hebras,"%d",12);
}else if (argc==4){
N = atoi(argv[1]); // Este sera el tamaño del vector y de las filas/columnas de la matriz
strcpy(planificacion,argv[2]); // Esto es para poder capturar el texto desde consola
strcpy(chunk,argv[3]); // Cual es el chunk del programa
sprintf(hebras,"%d",omp_get_num_procs());
}else if (argc==5){
N = atoi(argv[1]); // Este sera el tamaño del vector y de las filas/columnas de la matriz
strcpy(planificacion,argv[2]); // Esto es para poder capturar el texto desde consola
strcpy(chunk,argv[3]); // Cual es el chunk del programa
strcpy(hebras,argv[4]);
}
/* He elegido esta manera de asignar los valores al programa por que en OMP
* en la escala de prioridad esta es la segunda opción osea que la unica
* manera de poder cambiar la planificación del programa sería editando
* el codigo y utilizando un if.
*/
if (!strcmp(statics,planificacion)){ // ponemos ! por que si las cadenas son iguales el valor es 0
if (strcmp(chunk,"")){
strcat(statics,",");
strcat(statics,chunk);
}
setenv("OMP_SCHEDULE",statics,1); // Elegimos como queremos la planificación de bucles
}else if (!strcmp(dynamic,planificacion)){
if (strcmp(chunk,"")){
strcat(dynamic,",");
strcat(dynamic,chunk);
}
setenv("OMP_SCHEDULE",dynamic,1); // Elegimos como queremos la planificación de bucles
setenv("OMP_DYNAMIC","TRUE",1); // Seteamos a true el ajuste dinámico del nº de threads
}else if (!strcmp(guided,planificacion)){
if (strcmp(chunk,"")){
strcat(guided,",");
strcat(guided,chunk);
}
setenv("OMP_SCHEDULE",guided,1); // Elegimos como queremos la planificación de bucles
}
setenv("OMP_NUM_THREADS",hebras,1); // Seteamos el nº de threads en la siguiente ejecución paralela
int *vector, *Vresultado;
int **MatrizTri;
MatrizTri = (int**) malloc(N * sizeof(int*));
for (f = 0; f < N; f++)
MatrizTri[f] = (int*) malloc(N * sizeof(int));
vector = (int*) malloc(N * sizeof(int)); //si no hay espacio suficiente malloc devuelve NULL
Vresultado = (int*) malloc(N * sizeof(int));
if ((MatrizTri == NULL) || (vector == NULL) || (Vresultado == NULL)) {
printf("Error en la reserva de espacio para los Vectores o MatrizTri\n");
exit(-2);
}
srand(time(NULL)); // esta es la semilla que se usa para los random
#pragma omp parallel for schedule(runtime) private(f,c) shared(MatrizTri,vector)// Inicializamos la Matriz y el vector
for(f = 0; f < N; f++){
for(c = 0; c < N; c++){
if(f > c) // <---- Cambiando el sentido del simbolo la matriz es superior o inferior
MatrizTri[f][c]=rand()%10;
else
MatrizTri[f][c]=0;
}
vector[f] = rand()%10;
}
// imprimimos la matriz y el vector si el tamaño de N < PRINT_ALL_MIN
if (N <= PRINT_ALL_MIN && DEBUGMODE!=1){
printf ("\nEsta es la matriz: \n");
for (f = 0; f < N; f++){
for (c = 0; c < N; c++){
printf ("%d ",MatrizTri[f][c]);
}
printf ("\n");
}
printf ("\nEste es el vector: \n");
for (f = 0; f < N; f++)
printf ("%d ",vector[f]);
printf("\n\n");
}
t1 = omp_get_wtime(); // Calcular la multiplicación de una matriz por un vector
#pragma omp parallel shared(MatrizTri,vector,Vresultado)
{
#pragma omp for schedule(runtime) private(f,c)
for (f = 0; f < N; f++){
for (c = 0; c < N; c++)
Vresultado[f] += MatrizTri[f][c]*vector[c];
}
#pragma omp master
if (omp_in_parallel())
printf("Valores de las variables de control dentro del parallel:\n"
" dyn-var: %s\n"
" nthreads-var: %s\n"
" thread-limit-var: %s\n"
" nest-var: %s\n"
" run-sched-var: %s\n",getenv("OMP_DYNAMIC"),getenv("OMP_NUM_THREADS"), getenv("OMP_THREAD_LIMIT"),getenv("OMP_NESTED"),getenv("OMP_SCHEDULE"));
}
t2 = omp_get_wtime();
tr = t2 - t1; // Calculo el tiempo que he tardado en multiplicarlo
// Ahora imprimimos por pantalla los resultados obtenidos segun las restricciones del problema
if (N <= PRINT_ALL_MIN){
// si queremos imprimir datos completos y N < PRINT_ALL_MIN
printf("Tiempo(seg.):%11.9f\nTamaño Matriz y Vector:%u\n",tr,N);
printf ("Este es el vector resultante: \n");
printf("{");
for (f = 0; f < N; f++){
if (f==N-1)
printf ("VR[%d]=%d",f,Vresultado[f]);
else
printf ("VR[%d]=%d, ",f,Vresultado[f]);
}
printf("}\n");
}else if (DEBUGMODE==1) // si queremos imprimir unicamente el tiempo de cálculo
printf("%11.9f\n",tr);//
else{ // y si queremos imprimir el tiempo la primera y la ultima multiplicacón
printf("Tiempo(seg.):%11.9f\n",tr);
printf("Tamaño Matriz y Vector:%u\n",N);
printf("(Matriz[0][0]=%d)*(Vector[0]=%d)=%d\n",MatrizTri[0][0],vector[0],MatrizTri[0][0]*vector[0]);
printf("(Matriz[%d][%d]=%d)*(Vector[%d]=%d)=%d\n",N-1,N-1,MatrizTri[N-1][N-1],N-1,vector[N-1],MatrizTri[N-1][N-1]*vector[N-1]);
}
free(vector);
free(Vresultado);
for(f=0; f<N; f++)
free(MatrizTri[f]);
free(MatrizTri);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_specialization_constants_buffer,
kind_stream,
kind_last = kind_stream
};
public:
SYCLIntegrationHeader(Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(StringRef MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType,
SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Update the names of a kernel description based on its SyclKernel.
void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name,
StringRef StableName) {
auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) {
return KD.SyclKernel == SyclKernel;
});
assert(Itr != KernelDescs.end() && "Unknown kernel description");
Itr->updateKernelNames(Name, StableName);
}
/// Note which free functions (this_id, this_item, etc) are called within the
/// kernel
void setCallsThisId(bool B);
void setCallsThisItem(bool B);
void setCallsThisNDItem(bool B);
void setCallsThisGroup(bool B);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// there are four free functions the kernel may call (this_id, this_item,
// this_nd_item, this_group)
struct KernelCallsSYCLFreeFunction {
bool CallsThisId = false;
bool CallsThisItem = false;
bool CallsThisNDItem = false;
bool CallsThisGroup = false;
};
// Kernel invocation descriptor
struct KernelDesc {
/// sycl_kernel function associated with this kernel.
const FunctionDecl *SyclKernel;
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// Whether kernel calls any of the SYCL free functions (this_item(),
// this_id(), etc)
KernelCallsSYCLFreeFunction FreeFunctionCalls;
// If we are in unnamed kernel/lambda mode AND this is one that the user
// hasn't provided an explicit name for.
bool IsUnnamedKernel;
KernelDesc(const FunctionDecl *SyclKernel, QualType NameType,
SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel)
: SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc),
IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel) {}
void updateKernelNames(StringRef Name, StringRef StableName) {
this->Name = Name.str();
this->StableName = StableName.str();
}
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
Sema &S;
};
class SYCLIntegrationFooter {
public:
SYCLIntegrationFooter(Sema &S) : S(S) {}
bool emit(StringRef MainSrc);
void addVarDecl(const VarDecl *VD);
private:
bool emit(raw_ostream &O);
Sema &S;
llvm::SmallVector<const VarDecl *> SpecConstants;
void emitSpecIDName(raw_ostream &O, const VarDecl *VD);
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 32;
static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
/// or a consteval if function.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
class GlobalMethodPool {
public:
using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
using iterator = llvm::DenseMap<Selector, Lists>::iterator;
iterator begin() { return Methods.begin(); }
iterator end() { return Methods.end(); }
iterator find(Selector Sel) { return Methods.find(Sel); }
std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
return Methods.insert(Val);
}
int count(Selector Sel) const { return Methods.count(Sel); }
bool empty() const { return Methods.empty(); }
private:
llvm::DenseMap<Selector, Lists> Methods;
};
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// Bitmask to contain the list of reasons a single diagnostic should be
/// emitted, based on its language. This permits multiple offload systems
/// to coexist in the same translation unit.
enum class DeviceDiagnosticReason {
/// Diagnostic doesn't apply to anything. Included for completeness, but
/// should make this a no-op.
None = 0,
/// OpenMP specific diagnostic.
OmpDevice = 1 << 0,
OmpHost = 1 << 1,
OmpAll = OmpDevice | OmpHost,
/// CUDA specific diagnostics.
CudaDevice = 1 << 2,
CudaHost = 1 << 3,
CudaAll = CudaDevice | CudaHost,
/// SYCL specific diagnostic.
Sycl = 1 << 4,
/// ESIMD specific diagnostic.
Esimd = 1 << 5,
/// A flag representing 'all'. This can be used to avoid the check
/// all-together and make this behave as it did before the
/// DiagnosticReason was added (that is, unconditionally emit).
/// Note: This needs to be updated if any flags above are added.
All = OmpAll | CudaAll | Sycl | Esimd,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All)
};
private:
// A collection of a pair of undefined functions and their callers known
// to be reachable from a routine on the device (kernel or device function).
typedef std::pair<const FunctionDecl *, const FunctionDecl *> CallPair;
llvm::SmallVector<CallPair> UndefinedReachableFromSyclDevice;
public:
// Helper routine to add a pair of Callee-Caller pair of FunctionDecl *
// to UndefinedReachableFromSyclDevice.
void addFDToReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
UndefinedReachableFromSyclDevice.push_back(std::make_pair(Callee, Caller));
}
// Helper routine to check if a pair of Callee-Caller FunctionDecl *
// is in UndefinedReachableFromSyclDevice.
bool isFDReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
return llvm::any_of(UndefinedReachableFromSyclDevice,
[Callee, Caller](const CallPair &P) {
return P.first == Callee && P.second == Caller;
});
}
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S, DeviceDiagnosticReason R);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second
<< std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second.AddFixItHint(
Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
SYCLIntelFPGALoopCountAttr *
BuildSYCLIntelFPGALoopCountAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *
BuildSYCLIntelFPGAInitiationIntervalAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAMaxConcurrencyAttr *
BuildSYCLIntelFPGAMaxConcurrencyAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAMaxInterleavingAttr *
BuildSYCLIntelFPGAMaxInterleavingAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGASpeculatedIterationsAttr *
BuildSYCLIntelFPGASpeculatedIterationsAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGALoopCoalesceAttr *
BuildSYCLIntelFPGALoopCoalesceAttr(const AttributeCommonInfo &CI, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
DeviceDiagnosticReason getEmissionReason(const FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
NamedReturnInfo getNamedReturnInfo(
Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult
PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the statements's reachability
/// analysis.
///
/// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
/// the function body is parsed, and then do a basic reachability analysis to
/// determine if the statement is reachable. If it is unreachable, the
/// diagnostic will not be emitted.
bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
ExprResult BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
ExprResult ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
// Checks that the vector type should be initialized from a scalar
// by splatting the value rather than populating a single element.
// This is the case for AltiVecVector types as well as with
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
// Checks if the -faltivec-src-compat=gcc option is specified.
// If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
// treated the same way as they are when trying to initialize
// these vectors on gcc (an error is emitted).
bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
QualType SrcTy);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
for (const ExpressionEvaluationContextRecord &context :
llvm::reverse(ExprEvalContexts)) {
if (context.isImmediateFunctionContext())
return true;
if (context.isUnevaluated())
return false;
}
return false;
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr, Expr *ZDimExpr);
void AddWorkGroupSizeHintAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDim, Expr *YDim, Expr *ZDim);
WorkGroupSizeHintAttr *
MergeWorkGroupSizeHintAttr(Decl *D, const WorkGroupSizeHintAttr &A);
void AddIntelReqdSubGroupSize(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelReqdSubGroupSizeAttr *
MergeIntelReqdSubGroupSizeAttr(Decl *D, const IntelReqdSubGroupSizeAttr &A);
IntelNamedSubGroupSizeAttr *
MergeIntelNamedSubGroupSizeAttr(Decl *D, const IntelNamedSubGroupSizeAttr &A);
void AddSYCLIntelNumSimdWorkItemsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNumSimdWorkItemsAttr *
MergeSYCLIntelNumSimdWorkItemsAttr(Decl *D,
const SYCLIntelNumSimdWorkItemsAttr &A);
void AddSYCLIntelESimdVectorizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelESimdVectorizeAttr *
MergeSYCLIntelESimdVectorizeAttr(Decl *D,
const SYCLIntelESimdVectorizeAttr &A);
void AddSYCLIntelSchedulerTargetFmaxMhzAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelSchedulerTargetFmaxMhzAttr *MergeSYCLIntelSchedulerTargetFmaxMhzAttr(
Decl *D, const SYCLIntelSchedulerTargetFmaxMhzAttr &A);
void AddSYCLIntelNoGlobalWorkOffsetAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNoGlobalWorkOffsetAttr *MergeSYCLIntelNoGlobalWorkOffsetAttr(
Decl *D, const SYCLIntelNoGlobalWorkOffsetAttr &A);
void AddSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelLoopFuseAttr *
MergeSYCLIntelLoopFuseAttr(Decl *D, const SYCLIntelLoopFuseAttr &A);
void AddIntelFPGAPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGAMaxReplicatesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAMaxReplicatesAttr *
MergeIntelFPGAMaxReplicatesAttr(Decl *D, const IntelFPGAMaxReplicatesAttr &A);
void AddIntelFPGAForcePow2DepthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAForcePow2DepthAttr *
MergeIntelFPGAForcePow2DepthAttr(Decl *D,
const IntelFPGAForcePow2DepthAttr &A);
void AddSYCLIntelFPGAInitiationIntervalAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *MergeSYCLIntelFPGAInitiationIntervalAttr(
Decl *D, const SYCLIntelFPGAInitiationIntervalAttr &A);
SYCLIntelFPGAMaxConcurrencyAttr *MergeSYCLIntelFPGAMaxConcurrencyAttr(
Decl *D, const SYCLIntelFPGAMaxConcurrencyAttr &A);
void AddSYCLIntelMaxGlobalWorkDimAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelMaxGlobalWorkDimAttr *
MergeSYCLIntelMaxGlobalWorkDimAttr(Decl *D,
const SYCLIntelMaxGlobalWorkDimAttr &A);
void AddIntelFPGABankWidthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGABankWidthAttr *
MergeIntelFPGABankWidthAttr(Decl *D, const IntelFPGABankWidthAttr &A);
void AddIntelFPGANumBanksAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGANumBanksAttr *
MergeIntelFPGANumBanksAttr(Decl *D, const IntelFPGANumBanksAttr &A);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
/// AddSYCLIntelFPGAMaxConcurrencyAttr - Adds a max_concurrency attribute to a
/// particular declaration.
void AddSYCLIntelFPGAMaxConcurrencyAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD,
bool CheckValueDependent = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
/// Called on well-formed '\#pragma omp metadirective' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// Process a canonical OpenMP loop nest that can either be a canonical
/// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
/// OpenMP loop transformation construct.
StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp loop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \param NumAppendArgs The number of omp_interop_t arguments to account for
/// in checking.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, unsigned NumAppendArgs,
SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
/// \param AdjustArgsNothing The list of 'nothing' arguments.
/// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments.
/// \param AppendArgs The list of 'append_args' arguments.
/// \param AdjustArgsLoc The Location of an 'adjust_args' clause.
/// \param AppendArgsLoc The Location of an 'append_args' clause.
/// \param SR The SourceRange of the 'declare variant' directive.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
ArrayRef<Expr *> AdjustArgsNothing,
ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs,
SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc,
SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'when' clause.
OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *ActOnOpenMPMapClause(
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
class DeviceDeferredDiagnostic {
public:
DeviceDeferredDiagnostic(SourceLocation SL, const PartialDiagnostic &PD,
DeviceDiagnosticReason R)
: Diagnostic(SL, PD), Reason(R) {}
PartialDiagnosticAt &getDiag() { return Diagnostic; }
DeviceDiagnosticReason getReason() const { return Reason; }
private:
PartialDiagnosticAt Diagnostic;
DeviceDiagnosticReason Reason;
};
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<DeviceDeferredDiagnostic>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the type is allowed to be used for the current target.
void checkTypeSupport(QualType Ty, SourceLocation Loc,
ValueDecl *D = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
enum class AttributeCompletion {
Attribute,
Scope,
None,
};
void CodeCompleteAttribute(
AttributeCommonInfo::Syntax Syntax,
AttributeCompletion Completion = AttributeCompletion::Attribute,
const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
bool SemaBuiltinElementwiseMathOneArg(CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
std::unique_ptr<SYCLIntegrationFooter> SyclIntFooter;
// We need to store the list of the sycl_kernel functions and their associated
// generated OpenCL Kernels so we can go back and re-name these after the
// fact.
llvm::SmallVector<std::pair<const FunctionDecl *, FunctionDecl *>>
SyclKernelsToOpenCLKernels;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclOpenCLKernel(const FunctionDecl *SyclKernel,
FunctionDecl *OpenCLKernel) {
SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel);
}
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(*this);
return *SyclIntHeader.get();
}
SYCLIntegrationFooter &getSyclIntegrationFooter() {
if (SyclIntFooter == nullptr)
SyclIntFooter = std::make_unique<SYCLIntegrationFooter>(*this);
return *SyclIntFooter.get();
}
void addSyclVarDecl(VarDecl *VD) {
if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty())
getSyclIntegrationFooter().addVarDecl(VD);
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void copySYCLKernelAttrs(const CXXRecordDecl *KernelObj);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void SetSYCLKernelNames();
void MarkDevices();
/// Get the number of fields or captures within the parsed type.
ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given field number so that callers
/// can wrap it in a decltype() to get the actual type of the field.
ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc,
QualType SourceTy, Expr *Idx);
/// Get the number of base classes within the parsed type.
ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given base number so that callers
/// can wrap it in a decltype() to get the actual type of the base class.
ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy,
Expr *Idx);
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(
SourceLocation Loc, unsigned DiagID,
DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl |
DeviceDiagnosticReason::Esimd);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc,
DeviceDiagnosticReason Reason);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && VDecl->hasAttr<SYCLSimdAttr>() &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::sycl_private);
}
};
inline Expr *checkMaxWorkSizeAttrExpr(Sema &S, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = S.VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return nullptr;
E = ICE.get();
if (ArgVal.isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return E;
}
unsigned Val = ArgVal.getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< CI << E->getSourceRange();
return nullptr;
}
}
return E;
}
template <typename WorkGroupAttrType>
void Sema::addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
assert((XDimExpr && YDimExpr && ZDimExpr) &&
"argument has unexpected null value");
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (!XDimExpr->isValueDependent() && !YDimExpr->isValueDependent() &&
!ZDimExpr->isValueDependent()) {
// Save ConstantExpr in semantic attribute
XDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, XDimExpr);
YDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, YDimExpr);
ZDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, ZDimExpr);
if (!XDimExpr || !YDimExpr || !ZDimExpr)
return;
}
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
omp_pi_cputime.c | /* vim: set ts=4 sw=4: */
/* Filename : omp_pi_cputime.c
* Description : calculate pi
* Author : SunYoung Kim <sunyzero@gmail.com>
* Notes : numerical integration method
* ALSP Chapter.10 Realtime Extensions
*/
#define _XOPEN_SOURCE 600
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <time.h>
#include <errno.h>
int num_steps=400000000; /* integration 횟수: 4억번 (너무 많으면 줄이자.) */
struct timespec diff_ts(struct timespec t1, struct timespec t2);
clockid_t clock_cpu;
int main()
{
struct timespec ts1, ts2, ts_diff;
#ifdef _POSIX_CPUTIME
if (clock_getcpuclockid(0, &clock_cpu) == -1) {
perror("clock_getcpuclockid");
exit(EXIT_FAILURE);
}
clock_gettime(clock_cpu, &ts1);
printf("1: clock_gettime = %ld.%09ld\n", ts1.tv_sec, ts1.tv_nsec);
#endif
int i;
double x, step, sum = 0.0;
step = 1.0/(double) num_steps;
#pragma omp parallel
#pragma omp for private(x) reduction(+:sum) schedule(static) nowait
for (i=0; i<num_steps; i++) {
x = (i+0.5) * step;
sum += 4.0/(1.0 + x*x);
}
printf("pi = %.8f (sum = %.8f), 4*atan(1) = %.8f\n", step*sum, sum, atan(1)*4);
#ifdef _POSIX_CPUTIME
clock_gettime(clock_cpu, &ts2);
ts_diff = diff_ts(ts1, ts2);
printf("2: elapsed cpu time = %ld.%09ld\n", ts_diff.tv_sec, ts_diff.tv_nsec);
#endif
exit(EXIT_SUCCESS);
}
struct timespec diff_ts(struct timespec t1, struct timespec t2)
{
struct timespec t;
t.tv_sec = t2.tv_sec - t1.tv_sec;
t.tv_nsec = t2.tv_nsec - t1.tv_nsec;
if (t.tv_nsec < 0) {
t.tv_sec--;
t.tv_nsec += 1000000000;
}
return t;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.