source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
test.c | #include <omp.h>
#include <stdio.h>
int main()
{
int i, j;
int s = 0;
int N = 30;
int A[N];
int B[3];
omp_set_num_threads(3);
for (int j = 0; j < 3; j++)
{
#pragma omp parallel for lastprivate(A)
for (int i = 0; i < N / 3; i++)
{
// printf("%d\n", omp_get_thread_num());
A[i] = i;
}
for (int k = 0; k < N; k++)
{
printf("%d\n", A[k]);
}
}
return 0;
}
|
lud_omp.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void lud_omp_cpu(float *a, int size) {
int i, j, k;
float sum;
for (i = 0; i < size; i++) {
for (j = i; j < size; j++) {
sum = a[i * size + j];
for (k = 0; k < i; k++)
sum -= a[i * size + k] * a[k * size + j];
a[i * size + j] = sum;
}
for (j = i + 1; j < size; j++) {
sum = a[j * size + i];
for (k = 0; k < i; k++)
sum -= a[j * size + k] * a[k * size + i];
a[j * size + i] = sum / a[i * size + i];
}
}
}
void lud_omp_gpu(float *a, int size) {
int i, j, k;
float sum;
#pragma omp target data map(tofrom : a[0 : size *size])
{
for (i = 0; i < size; i++) {
#pragma omp target teams distribute parallel for
for (j = i; j < size; j++) {
sum = a[i * size + j];
for (k = 0; k < i; k++)
sum -= a[i * size + k] * a[k * size + j];
a[i * size + j] = sum;
}
#pragma omp target teams distribute parallel for
for (j = i + 1; j < size; j++) {
sum = a[j * size + i];
for (k = 0; k < i; k++)
sum -= a[j * size + k] * a[k * size + i];
a[j * size + i] = sum / a[i * size + i];
}
}
}
}
|
Cmrcmodel_p.c | #include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include "numpy/arrayobject.h"
#include <math.h>
#include <fftw3.h>
#ifdef _OPENMP
#include <omp.h>
#endif
typedef struct MRCHeader
{
int nx; //number of columns (fastest changing in map)
int ny; //number of rows
int nz; //number of sections (slowest changing in map)
int mode; //MODE data type :
// 0 image : signed 8-bit bytes range -128 to 127
// 1 image : 16-bit halfwords
// 2 image : 32-bit reals
// 3 transform : complex 16-bit integers
// 4 transform : complex 32-bit reals
// 5 image : unsigned 8-bit range 0 to 255
// 6 image : unsigned 16-bit range 0 to 65535
int nxstart; //number of first column in map (Default = 0)
int nystart; //number of first row in map
int nzstart; //number of first section in map
int mx; // number of intervals along X
int my; //number of intervals along Y
int mz; //number of intervals along Z
float cella[3]; //cell dimensions in angstroms
float cellb[3]; //cell angles in degrees
int mapc; //axis corresp to cols (1,2,3 for X,Y,Z)
int mapr; //axis corresp to rows (1,2,3 for X,Y,Z)
int maps; // axis corresp to sections (1,2,3 for X,Y,Z)
float dmin; //minimum density value
float dmax; //maximum density value
float dmean; //mean density value
int ispg; //space group number 0 or 1 (default=0)
int nsymbt; //number of bytes used for symmetry data (0 or 80)
char extra[100]; //extra space used for anything - 0 by default
float origin[3]; //origin in X,Y,Z used for transforms
char map[4]; //character string 'MAP ' to identify file type
int machst; //machine stamp
float rms; //rms deviation of map from mean density
int nlabels; //number of labels being used
char label[10][80]; //ten 80-character text labels
//Symmetry records follow - if any - stored as text
//as in International Tables, operators separated
//by * and grouped into 'lines' of 80 characters
//(ie. symmetry operators do not cross the ends of
//the 80-character 'lines' and the 'lines' do not
//terminate in a *).
//Data records follow.
} MRCHeader;
typedef struct CMPLXd
{
double x;
double y;
} CMPLXd;
typedef struct CMPLXf
{
float x;
float y;
} CMPLXf;
static CMPLXf GetStructureFactor(int *atomid,float *occ,float *bf,float *cor,int atomnumber, float h, float k, float l)
{
int i;
float PI=3.14159265359;
float a1[]={0.04924,0.06056,0.18100,0.18017,0.18848,0.23238,0.13897,
0.24115,0.23422,0.23449,0.54110,0.52369,0.56914,0.53363,0.50346,
0.49157,0.48380,0.47103,0.83435,0.81886,0.82184,0.83718,0.85465,
0.88038,0.88646,0.90628,0.90998,0.93017,0.95193,0.95958,1.09595,
1.07488,1.04029,1.02041,1.00375,0.98899,1.42780,1.38199,1.34006,
1.30145,1.25788,1.23849,1.24122,1.21842,1.20244,1.05766,1.21087,
1.22503,1.35495,1.33656,1.30727,1.29090,1.30488,1.42532,2.21883,
2.19676,2.13774,2.14007,2.19312,2.18399,2.17908,2.18332,2.17938,
2.11909,2.19660,2.16544,2.04451,2.15671,2.15114,2.14582,2.07541,
2.03115,1.98850,1.97392,1.92672,1.90732,1.87864,1.84670,1.82271,
1.83029,1.93415,1.94124,1.92664,1.88230,1.86076,1.86088,2.60915,
2.61407,2.56421,2.50061,2.55227,2.56086,2.53867,2.57218,2.57125,
2.53626,2.54045,2.25859};
float b1[]={1.12357,0.69147,1.37982,1.11557,0.97203,1.01435,0.52475,
0.80966,0.70369,0.63245,1.19898,1.08412,1.10085,0.97815,0.87932,
0.82092,0.77733,0.72503,1.26295,1.19024,1.14499,1.11535,1.08878,
1.07250,1.03534,1.01383,0.97791,0.96164,0.94385,0.91866,0.99943,
0.95257,0.89757,0.85673,0.82180,0.78947,1.13570,1.07975,1.03163,
0.98560,0.93706,0.90855,0.90093,0.87173,0.85021,0.72821,0.83473,
0.83438,0.91033,0.88439,0.85098,0.82652,0.82164,0.87668,1.28716,
1.25255,1.20039,1.17941,1.18570,1.16032,1.13818,1.12260,1.10027,
1.05620,1.07789,1.04740,0.97532,1.01427,0.99878,0.98379,0.93924,
0.90829,0.87840,0.86344,0.83350,0.81707,0.79535,0.77396,0.75692,
0.75599,0.79990,0.79971,0.78860,0.76281,0.74794,0.74375,1.06688,
1.05942,1.02943,0.99429,1.00772,1.00272,0.98578,0.99197,0.98329,
0.96055,0.95368,0.83727};
float a2[]={0.13162,0.15040,0.46885,0.56677,0.64565,0.81994,0.48164,
0.66042,0.62045,0.56495,0.90328,0.89281,1.06080,1.08891,1.09013,
1.13064,1.16254,1.18116,2.57342,2.38629,2.29740,2.22146,2.14893,
2.09996,1.99407,1.92631,1.85730,1.78977,1.74900,1.67373,1.72261,
1.67658,1.61472,1.56993,1.52874,1.53397,3.42834,3.33289,3.27025,
3.22818,3.20895,3.20473,3.20720,3.19676,3.16863,2.69010,3.16064,
3.12183,3.24016,3.11583,2.98093,2.85075,2.76224,2.77836,4.60571,
4.67806,4.59844,4.55426,4.52007,4.42520,4.35491,4.28398,4.22665,
4.09793,4.11165,3.98593,3.74949,3.83978,3.76811,3.70152,3.58591,
3.51892,3.48239,3.48896,3.44733,3.45952,3.46471,3.46733,3.47527,
3.53807,3.85934,3.85731,3.82543,3.71207,3.63868,3.58711,5.08860,
5.18681,5.10621,4.98823,5.16312,5.20664,5.15760,5.22851,5.21316,
5.10320,5.07297,4.49760};
float b2[]={4.81011,3.08412,10.0558,7.28333,5.81321,5.78949,2.68683,
3.71987,3.09136,2.64199,6.49087,5.89505,6.73577,5.88225,5.13366,
4.73512,4.38180,4.00402,8.16010,7.17228,6.56826,6.13917,5.78576,
5.53692,5.18729,4.97780,4.71826,4.56669,4.46409,4.30323,5.03468,
4.78250,4.43901,4.20859,4.01238,3.88699,8.55214,7.78631,7.15377,
6.62142,6.13119,5.80547,5.60056,5.28704,5.01511,4.00891,4.69018,
4.55327,4.91209,4.61908,4.30739,4.05350,3.93610,4.20012,8.64777,
8.37051,7.83720,7.64689,7.71404,7.46517,7.27662,7.13181,6.98080,
6.54065,6.79783,6.50638,5.79447,6.24679,6.12492,6.01377,5.61207,
5.35170,5.12588,5.02772,4.79093,4.68197,4.53871,4.38830,4.26626,
4.26088,4.66429,4.60863,4.47990,4.23226,4.07523,3.98911,7.01375,
6.96304,6.62342,6.24105,6.35932,6.28157,6.06470,6.06710,5.94195,
5.68828,5.57817,4.52510};
float a3[]={0.22393,0.15627,1.47726,1.44076,1.33187,1.12081,1.06113,
0.79431,0.71679,0.63292,1.60421,2.10328,2.67657,2.70558,2.58035,
2.42939,2.26346,2.09559,2.42561,3.38497,3.22354,3.04722,2.89030,
2.20830,2.54856,2.41077,2.28316,2.16054,1.64503,1.95071,2.61438,
2.87946,2.98528,3.02495,2.99457,2.97758,2.98652,4.00265,4.12802,
4.06357,3.55467,3.44083,3.64441,3.12230,2.96386,2.64994,2.66163,
2.86597,3.44836,3.80499,4.04458,4.19425,4.38534,4.55840,4.43838,
5.20415,5.46274,5.28185,4.71689,4.54596,4.42783,4.31155,4.23139,
4.45265,4.09941,3.90800,4.00012,3.74202,3.65004,3.58929,3.87601,
4.01070,4.07937,4.08018,4.03028,3.97995,3.91500,3.61207,3.51273,
3.60589,3.92418,4.20251,4.52248,4.73640,4.97434,5.18974,5.62536,
6.02657,6.43140,6.70978,6.10216,5.93139,5.71736,5.13516,4.98168,
5.21947,5.06255,4.74670};
float b3[]={14.8439,9.62423,45.2163,26.6073,20.0682,19.2115,9.74277,
11.1230,9.44919,7.79134,40.7770,29.8227,29.0652,23.3404,18.8552,
16.1274,14.0357,12.2894,45.7999,39.3149,34.5258,31.4668,29.2795,
26.8071,25.4956,24.2683,22.9255,21.9676,21.0681,20.4870,24.7417,
22.0912,19.0809,16.8080,14.8316,13.5568,41.4786,39.6810,34.5591,
30.6316,26.3035,24.5162,24.3207,21.7096,20.3657,13.5298,19.1689,
19.4157,23.6490,22.0613,20.0491,18.1887,17.1101,17.1970,37.6962,
39.1838,35.2049,34.6029,36.6823,35.5218,35.0384,34.6951,34.4883,
30.8713,34.6966,32.6372,26.7471,31.8179,31.3490,31.0735,27.8278,
25.4086,23.4938,22.2866,20.4165,19.3994,18.3615,16.7894,15.9633,
16.2594,19.7903,19.8331,19.4982,18.1512,17.3069,16.6754,31.4890,
32.0739,30.0266,27.5340,28.2608,27.9933,26.8172,27.2516,26.8301,
25.4559,25.0739,18.4511};
float a4[]={0.11705,0.04248,1.13754,0.84079,0.60166,0.30422,0.50718,
0.25232,0.19442,0.18389,1.66459,1.62238,1.50965,1.42748,1.24278,
1.03713,0.87424,0.75940,3.03079,3.20375,2.84374,2.54718,2.28594,
1.65155,1.94847,1.79055,1.67125,1.55296,1.11668,1.34175,1.52306,
1.59500,1.53121,1.44175,1.38444,1.24952,3.72877,4.18667,3.72874,
3.36196,2.44883,2.17559,2.55125,1.80913,1.69441,0.99422,1.42689,
1.80448,2.15730,2.37062,2.41286,2.43357,2.22441,1.79305,4.92676,
5.87447,5.28736,5.08329,5.23029,5.12534,4.95194,4.78637,4.60577,
4.27187,4.22118,4.24760,4.26511,4.00651,3.90289,3.78259,3.61559,
3.29314,2.98409,2.68047,2.54236,2.32049,2.14741,1.57926,1.45598,
1.67871,2.05207,2.25982,2.48360,2.69494,2.66395,2.51993,4.93145,
6.27116,5.92985,5.46739,5.29750,4.97860,4.89271,4.79656,4.61062,
4.39750,4.27205,5.03828};
float b4[]={39.0726,23.5306,128.547,73.1093,57.3252,54.1132,30.7532,
30.3521,26.2880,21.7321,129.803,88.1764,90.5010,71.1450,54.9253,
45.3868,38.5357,33.0660,166.443,121.103,108.232,100.008,94.2114,
98.1169,83.2112,79.4409,75.6734,72.4782,79.6493,67.4006,84.9517,
71.1665,57.6297,48.6300,41.5922,37.1324,173.152,130.817,112.588,
101.129,96.6631,92.2924,83.6231,84.2304,80.6371,44.4972,78.9906,
69.6184,85.1136,73.8053,62.3919,53.7125,48.8038,46.9535,189.044,
145.486,126.133,123.673,134.769,131.059,128.845,126.872,125.712,
109.416,124.661,118.443,96.1710,114.901,112.622,111.822,96.1033,
86.5407,80.1163,75.7871,69.5880,66.0742,62.9838,60.4880,57.7965,
56.3570,74.7244,69.2077,64.7147,57.2473,52.3519,48.6098,171.743,
134.285,114.588,100.577,107.961,106.931,103.069,112.848,110.605,
97.5280,95.3855,76.9542};
float c[]={0.00683,0.00819,0.02026,0.02330,0.02600,0.03179,0.02179,
0.03513,0.03534,0.03567,0.06161,0.06365,0.07005,0.06988,0.06975,
0.07118,0.07318,0.07339,0.11302,0.11416,0.11645,0.11901,0.12164,
0.12471,0.12617,0.12842,0.12965,0.13262,0.13474,0.13659,0.14866,
0.14870,0.14722,0.14656,0.14669,0.14660,0.19685,0.19625,0.19642,
0.19601,0.19452,0.19520,0.19966,0.19977,0.20136,0.18384,0.20701,
0.21128,0.22698,0.22688,0.22472,0.22396,0.22646,0.23859,0.29953,
0.29951,0.29740,0.29857,0.30398,0.30473,0.30577,0.30792,0.30810,
0.30556,0.31428,0.31329,0.30399,0.31613,0.31797,0.31964,0.32463,
0.31215,0.30946,0.31075,0.30761,0.30798,0.30603,0.30430,0.30394,
0.30863,0.32681,0.33191,0.33331,0.32945,0.32869,0.33168,0.42517,
0.42739,0.42435,0.41959,0.42682,0.42920,0.42879,0.43444,0.43596,
0.43314,0.43449,0.40399};
CMPLXd Fhkl;
CMPLXf tmp;
float r2=h*h+k*k+l*l;
Fhkl.x=0.0;
Fhkl.y=0.0;
int curAtomID=-1;
float AtomDiffFactor=0.0;
float amp,phase;
for(i=0;i<atomnumber;i++)
{
if(atomid[i]!=curAtomID)
{
curAtomID=atomid[i];
AtomDiffFactor=(a1[curAtomID]*exp(-(b1[curAtomID])*r2/4.0)+a2[curAtomID]*exp(-(b2[curAtomID])*r2/4.0)+a3[curAtomID]*exp(-(b3[curAtomID])*r2/4.0)+a4[curAtomID]*exp(-(b4[curAtomID])*r2/4.0)+c[curAtomID]);
}
amp=exp(-r2*bf[i]/2.0)*occ[i]*AtomDiffFactor;
phase=-2.0*PI*(h*cor[i*3+2]+k*cor[i*3+1]+l*cor[i*3]);
Fhkl.x+=cos(phase)*amp;
Fhkl.y+=sin(phase)*amp;
}
tmp.x=Fhkl.x;
tmp.y=Fhkl.y;
return tmp;
}
static void ifft3d(float* buf, int nsam, float* out)
{
fftwf_plan plan_fft=fftwf_plan_dft_c2r_3d(nsam,nsam,nsam,(fftwf_complex *) buf,out,FFTW_ESTIMATE);
fftwf_execute(plan_fft);
fftwf_destroy_plan(plan_fft);
}
static float normal_random(float sigma)
{
float u = ((float) rand() / (RAND_MAX)) * 2 - 1;
float v = ((float) rand() / (RAND_MAX)) * 2 - 1;
float r = u * u + v * v;
if (r == 0 || r > 1) return normal_random(sigma);
float c = sqrt(-2 * log(r) / r);
return u * c;
}
static PyObject *Cpdb2mrc(PyObject *self, PyObject *args, PyObject *kwargs)
{
PyArrayObject *atomiddata, *occdata,*bfdata, *cordata, *map;
int nsam;
float psize,res,fsccutoff;
static char *kwlist[] = {"atomid", "occ", "bf", "cor", "map", "nsam", "psize", "res", "fsccutoff", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OOOOOifff", kwlist,
&atomiddata, &occdata, &bfdata, &cordata, &map, &nsam, &psize, &res, &fsccutoff))
return Py_BuildValue("Os", Py_None,"Couldn't parse variable from C function.");
atomiddata=PyArray_GETCONTIGUOUS(atomiddata);
int *atomid = (int *) PyArray_DATA(atomiddata);
occdata=PyArray_GETCONTIGUOUS(occdata);
float *occ = (float *) PyArray_DATA(occdata);
bfdata=PyArray_GETCONTIGUOUS(bfdata);
float *bf = (float *) PyArray_DATA(bfdata);
cordata=PyArray_GETCONTIGUOUS(cordata);
float *cor = (float *) PyArray_DATA(cordata);
map=PyArray_GETCONTIGUOUS(map);
float *data = (float *) PyArray_DATA(map);
int atomnumber = PyArray_DIMS(atomiddata)[0];
int i=0,j=0,k=0,ii=0,jj=0,kk=0,id=0,id0=0,id1=0;
int isodd=nsam%2;
int hnsaml=(nsam/2+1);
int nsaml=hnsaml*2;
int hnsam=nsam/2;
float Fpsize=1.0/(psize*nsam);
float rmax=psize*nsam/res;
if(rmax>hnsam)
{
rmax=hnsam;
printf("* Warning: resolution is larger than Nyquist.(From C code.)\n");
}
float *pfft=malloc(sizeof(float)*nsam*nsam*nsaml);
CMPLXf *pfftc=(CMPLXf *)pfft;
int step=(int)(nsam/10.0+0.5),count=0,p=0;
float closef2=0.,closefcount=0.;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) firstprivate(isodd,hnsaml,hnsam,ii,nsam,id0,j,jj,id1,k,kk,id,rmax,Fpsize,atomid,occ,bf,cor,atomnumber) shared(count,p) reduction(+:closef2) reduction(+:closefcount)
#endif
for(i=-hnsam;i<hnsam+isodd;i++)
{
ii=(i+nsam)%nsam;
id0=ii*nsam;
if (count%step==0){
#ifdef _OPENMP
#pragma omp critical
#endif
if(p==0){
printf("\r* Progress: %3d%%",(int)(count*100.0/nsam));
fflush(stdout);
p=1;
}
}
for(j=-hnsam;j<hnsam+isodd;j++)
{
jj=(j+nsam)%nsam;
id1=(jj+id0)*hnsaml;
for(k=0;k<hnsaml;k++)
{
kk=k;
id=kk+id1;
if(sqrt(i*i+j*j+k*k)>rmax)
{
pfftc[id].x=0.0;
pfftc[id].y=0.0;
}
else{
pfftc[id]=GetStructureFactor(atomid,occ,bf,cor,atomnumber, k*Fpsize, j*Fpsize, i*Fpsize);
if (sqrt(i*i+j*j+k*k)>rmax-.5){
closef2+=pfftc[id].x*pfftc[id].x+pfftc[id].y*pfftc[id].y;
closefcount++;
}
}
}
}
#ifdef _OPENMP
#pragma omp critical
#endif
{
count++;
p=0;
}
}
printf("\r* Progress: 100%%\n");
float sig=sqrt((1-fsccutoff)*closef2/closefcount/fsccutoff/4);
for(i=-hnsam;i<hnsam+isodd;i++)
{
ii=(i+nsam)%nsam;
id0=ii*nsam;
for(j=-hnsam;j<hnsam+isodd;j++)
{
jj=(j+nsam)%nsam;
id1=(jj+id0)*hnsaml;
for(k=0;k<hnsaml;k++)
{
kk=k;
id=kk+id1;
pfftc[id].x+=normal_random(sig);
pfftc[id].y+=normal_random(sig);
}
}
}
ifft3d(pfft,nsam,data);
float scale=nsam*nsam*nsam;
for(i=0;i<nsam;i++)
{
id=i*nsam;
for(j=0;j<nsam;j++)
{
id1=(j+id)*nsam;
for(k=0;k<nsam;k++)
{
data[id1+k]=data[id1+k]/scale;
}
}
}
free(pfft);
return Py_None;
}
static PyMethodDef Cmrcmodel_p_methods[] = {
{"Cpdb2mrc", (PyCFunction)Cpdb2mrc,
METH_VARARGS | METH_KEYWORDS,
"Read the MRC file header into a header variable.\n"},
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef Cmrcmodel_pmodule = {
PyModuleDef_HEAD_INIT,
"Cmrcmodel_p",
"MRC model tools.",
-1,
Cmrcmodel_p_methods,
};
PyMODINIT_FUNC PyInit_Cmrcmodel_p(void) {
import_array();
return PyModule_Create(&Cmrcmodel_pmodule);
}
#else
PyMODINIT_FUNC initCmrcmodel_p(void) {
Py_InitModule3("Cmrcmodel_p", Cmrcmodel_p_methods,
"MRC model tools.");
import_array();
}
#endif |
nr_incore.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Incore version of non-relativistic integrals JK contraction
* ic in CVHFic... is short for incore
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cvhf.h"
#include "np_helper/np_helper.h"
#include "fblas.h"
/*
* J
*/
void CVHFics8_ij_s2kl_o0(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
int i, j, ij;
double dm_ij;
double *vj_ij = &vj[ic*nao+jc];
if (ic > jc) {
dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic];
} else if (ic == jc) {
dm_ij = dm[ic*nao+ic];
} else {
return;
}
for (i = 0, ij = 0; i < ic; i++) {
for (j = 0; j < i; j++, ij++) {
*vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]);
vj[i*nao+j] += eri[ij] * dm_ij;
}
*vj_ij += eri[ij] * dm[i*nao+i];
vj[i*nao+i] += eri[ij] * dm_ij;
ij++;
}
// i == ic
for (j = 0; j < jc; j++, ij++) {
*vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]);
vj[i*nao+j] += eri[ij] * dm_ij;
}
*vj_ij += eri[ij] * dm_ij;
}
void CVHFics4_ij_s2kl_o0(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
int i, j, ij;
double dm_ij;
if (ic > jc) {
dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic];
} else if (ic == jc) {
dm_ij = dm[ic*nao+ic];
} else {
return;
}
for (i = 0, ij = 0; i < nao; i++) {
for (j = 0; j <= i; j++, ij++) {
vj[i*nao+j] += eri[ij] * dm_ij;
}
}
}
void CVHFics2kl_kl_s1ij_o0(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
int i, j, ij;
double *vj_ij = &vj[ic*nao+jc];
for (i = 0, ij = 0; i < nao; i++) {
for (j = 0; j < i; j++, ij++) {
*vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]);
}
*vj_ij += eri[ij] * dm[i*nao+i];
ij++;
}
}
/*
* K
*/
void CVHFics8_jk_s1il_o0(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
if (ic > jc) {
for (k = 0, kl = 0; k < ic; k++) {
for (l = 0; l < k; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
vk[l*nao+jc] += eri[kl] * dm[k*nao+ic];
vk[k*nao+jc] += eri[kl] * dm[l*nao+ic];
vk[l*nao+ic] += eri[kl] * dm[k*nao+jc];
vk[k*nao+ic] += eri[kl] * dm[l*nao+jc];
}
vk[jc*nao+k] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+k];
vk[k*nao+jc] += eri[kl] * dm[k*nao+ic];
vk[k*nao+ic] += eri[kl] * dm[k*nao+jc];
kl++;
}
k = ic;
for (l = 0; l < jc; l++, kl++) { // l<k
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
vk[l*nao+jc] += eri[kl] * dm[k*nao+ic];
vk[k*nao+jc] += eri[kl] * dm[l*nao+ic];
vk[l*nao+ic] += eri[kl] * dm[k*nao+jc];
vk[k*nao+ic] += eri[kl] * dm[l*nao+jc];
}
// ic = k, jc = l;
vk[jc*nao+jc] += eri[kl] * dm[ic*nao+ic];
vk[ic*nao+jc] += eri[kl] * dm[jc*nao+ic];
vk[jc*nao+ic] += eri[kl] * dm[ic*nao+jc];
vk[ic*nao+ic] += eri[kl] * dm[jc*nao+jc];
} else if (ic == jc) {
for (k = 0, kl = 0; k < ic; k++) {
for (l = 0; l < k; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+k] += eri[kl] * dm[ic*nao+l];
vk[l*nao+ic] += eri[kl] * dm[k*nao+ic];
vk[k*nao+ic] += eri[kl] * dm[l*nao+ic];
}
vk[ic*nao+k] += eri[kl] * dm[ic*nao+k];
vk[k*nao+ic] += eri[kl] * dm[k*nao+ic];
kl++;
}
k = ic;
for (l = 0; l < k; l++, kl++) { // l<k
vk[ic*nao+l] += eri[kl] * dm[ic*nao+ic];
vk[ic*nao+ic] += eri[kl] * dm[ic*nao+l];
vk[l*nao+ic] += eri[kl] * dm[ic*nao+ic];
vk[ic*nao+ic] += eri[kl] * dm[l*nao+ic];
}
// ic = jc = k = l
vk[ic*nao+ic] += eri[kl] * dm[ic*nao+ic];
}
}
void CVHFics8_jk_s2il_o0(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l;
if (ic > jc) {
// k < jc
for (k=0; k < jc; k++) {
for (l = 0; l < k; l++) {
vk[jc*nao+l] += eri[l] * dm[ic*nao+k];
vk[jc*nao+k] += eri[l] * dm[ic*nao+l];
vk[ic*nao+l] += eri[l] * dm[jc*nao+k];
vk[ic*nao+k] += eri[l] * dm[jc*nao+l];
}
// l = k
vk[jc*nao+k] += eri[k] * dm[ic*nao+k];
vk[ic*nao+k] += eri[k] * dm[jc*nao+k];
eri += k + 1;
}
// k = jc
for (l = 0; l < k; l++) {
vk[jc*nao+l] += eri[l] * dm[ic*nao+jc];
vk[jc*nao+jc] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]);
vk[ic*nao+l] += eri[l] * dm[jc*nao+jc];
vk[ic*nao+jc] += eri[l] * dm[jc*nao+l];
}
// l = k = jc
vk[jc*nao+jc] += eri[l] *(dm[ic*nao+jc] + dm[jc*nao+ic]);
vk[ic*nao+jc] += eri[l] * dm[jc*nao+jc];
eri += k + 1;
// k > jc
for (k=jc+1; k < ic; k++) {
// l < jc
for (l = 0; l < jc; l++) {
vk[jc*nao+l] += eri[l] * dm[ic*nao+k];
vk[ic*nao+l] += eri[l] * dm[jc*nao+k];
vk[ic*nao+k] += eri[l] * dm[jc*nao+l];
vk[k*nao+jc] += eri[l] * dm[l*nao+ic];
}
// l = jc
vk[jc*nao+jc] += eri[l] *(dm[ic*nao+k] + dm[k*nao+ic]);
vk[ic*nao+jc] += eri[l] * dm[jc*nao+k];
vk[ic*nao+k] += eri[l] * dm[jc*nao+jc];
vk[k*nao+jc] += eri[l] * dm[jc*nao+ic];
//eri += jc+1;
// l > jc
for (l = jc+1; l < k; l++) {
vk[ic*nao+l] += eri[l] * dm[jc*nao+k];
vk[ic*nao+k] += eri[l] * dm[jc*nao+l];
vk[l*nao+jc] += eri[l] * dm[k*nao+ic];
vk[k*nao+jc] += eri[l] * dm[l*nao+ic];
}
// l = k
vk[jc*nao+k] += eri[l] * dm[ic*nao+k];
vk[ic*nao+k] += eri[l] * dm[jc*nao+k];
vk[k*nao+jc] += eri[l] * dm[k*nao+ic];
eri += k + 1;
}
// k = ic
for (l = 0; l < jc; l++) {
vk[jc*nao+l] += eri[l] * dm[ic*nao+ic];
vk[ic*nao+l] += eri[l] * dm[jc*nao+ic];
vk[ic*nao+ic] += eri[l] *(dm[jc*nao+l] + dm[l*nao+jc]);
vk[ic*nao+jc] += eri[l] * dm[l*nao+ic];
}
// ic = k, jc = l;
vk[jc*nao+jc] += eri[l] * dm[ic*nao+ic];
vk[ic*nao+jc] += eri[l] * dm[jc*nao+ic];
vk[ic*nao+ic] += eri[l] * dm[jc*nao+jc];
eri += jc + 1;
} else if (ic == jc) {
for (k = 0; k < ic-1; k+=2) {
for (l = 0; l < k; l++) {
vk[ic*nao+l] += eri[l] * dm[ic*nao+k];
vk[ic*nao+k] += eri[l] * dm[ic*nao+l];
vk[ic*nao+l ] += eri[l+k+1] * dm[ic*nao+k+1];
vk[ic*nao+k+1] += eri[l+k+1] * dm[ic*nao+l ];
}
vk[ic*nao+k] += eri[k] * dm[ic*nao+k];
eri += k+1;
vk[ic*nao+k ] += eri[k] * dm[ic*nao+k+1];
vk[ic*nao+k+1] += eri[k] * dm[ic*nao+k ];
vk[ic*nao+k+1] += eri[k+1] * dm[ic*nao+k+1];
eri += k+2;
}
for (; k < ic; k++) {
for (l = 0; l < k; l++) {
vk[ic*nao+l] += eri[l] * dm[ic*nao+k];
vk[ic*nao+k] += eri[l] * dm[ic*nao+l];
}
vk[ic*nao+k] += eri[k] * dm[ic*nao+k];
eri += k+1;
}
for (l = 0; l < k; l++) { // l<k
vk[ic*nao+l] += eri[l] * dm[ic*nao+ic];
vk[ic*nao+ic] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]);
}
// ic = jc = k = l
vk[ic*nao+ic] += eri[l] * dm[ic*nao+ic];
eri += k + 1;
}
}
void CVHFics4_jk_s1il_o0(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
if (ic > jc) {
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < k; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
}
vk[jc*nao+k] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+k];
kl++;
}
} else if (ic == jc) {
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < k; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+k] += eri[kl] * dm[ic*nao+l];
}
vk[ic*nao+k] += eri[kl] * dm[ic*nao+k];
kl++;
}
}
}
void CVHFics4_il_s1jk_o0(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc);
}
void CVHFics4_jk_s2il_o0(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
if (ic > jc) {
for (k = 0, kl = 0; k <= jc; k++) {
for (l = 0; l < k; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
}
vk[jc*nao+k] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+k];
kl++;
}
for (k = jc+1; k <= ic; k++) {
for (l = 0; l <= jc; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
}
for (l = jc+1; l < k; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
}
vk[ic*nao+k] += eri[kl] * dm[jc*nao+k];
kl++;
}
for (k = ic+1; k < nao; k++) {
for (l = 0, kl = k*(k+1)/2; l <= jc; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
}
for (l = jc+1; l <= ic; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
}
}
} else if (ic == jc) {
for (k = 0, kl = 0; k <= ic; k++) {
for (l = 0; l < k; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+k] += eri[kl] * dm[ic*nao+l];
}
vk[ic*nao+k] += eri[kl] * dm[ic*nao+k];
kl++;
}
for (k = ic+1; k < nao; k++) {
for (l = 0, kl = k*(k+1)/2; l <= ic; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[ic*nao+k];
}
}
}
}
void CVHFics4_il_s2jk_o0(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc);
}
/*
* einsum ijkl,ij->(s2)kl
* 8-fold symmetry for eri: i>=j,k>=l,ij>=kl
* input address eri of the first element for pair ij=ic*(ic+1)/2+jc
* i.e. ~ &eri_ao[ij*(ij+1)/2]
* dm can be non-Hermitian,
* output vk might not be Hermitian
*
* NOTE all _s2kl (nrs8_, nrs4_, nrs2kl_) assumes the tril part of eri
* being stored in C-order *contiguously*. so call CVHFunpack_nrblock2tril
* to generate eris
*/
void CVHFics8_ij_s2kl(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
CVHFics8_ij_s2kl_o0(eri, dm, vj, nao, ic, jc);
}
// tri_dm: fold upper triangular dm to lower triangle,
// tri_dm[i*(i+1)/2+j] = dm[i*nao+j] + dm[j*nao+i] for i > j
void CVHFics8_tridm_vj(double *eri, double *tri_dm, double *vj,
int nao, int ic, int jc)
{
int i, j, ij;
double dm_ijc = tri_dm[ic*(ic+1)/2+jc];
double *vj_ij = &vj[ic*nao+jc];
const int INC1 = 1;
int i1;
for (i = 0, ij = 0; i < ic; i++) {
i1 = i + 1;
*vj_ij += ddot_(&i1, eri+ij, &INC1, tri_dm+ij, &INC1);
daxpy_(&i1, &dm_ijc, eri+ij, &INC1, vj+i*nao, &INC1);
ij += i1;
}
// i == ic
for (j = 0; j < jc; j++, ij++) {
*vj_ij += eri[ij] * tri_dm[ij];
vj[i*nao+j] += eri[ij] * dm_ijc;
}
*vj_ij += eri[ij] * dm_ijc;
}
void CVHFics8_jk_s1il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics8_jk_s1il_o0(eri, dm, vk, nao, ic, jc);
}
/*
* einsum ijkl,jk->(s2)il
* output vk should be Hermitian
*/
void CVHFics8_jk_s2il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics8_jk_s2il_o0(eri, dm, vk, nao, ic, jc);
}
/*
* einsum ijkl,jk->il
* 4-fold symmetry for eri: i>=j,k>=l
*/
void CVHFics4_jk_s1il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc);
}
void CVHFics4_il_s1jk(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc);
}
/*
* output vk should be Hermitian
*/
void CVHFics4_jk_s2il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc);
}
void CVHFics4_il_s2jk(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc);
}
void CVHFics4_ij_s2kl(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
CVHFics4_ij_s2kl_o0(eri, dm, vj, nao, ic, jc);
}
void CVHFics4_kl_s2ij(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
if (ic >= jc) {
CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc);
}
}
void CVHFics1_ij_s1kl(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
int i;
double dm_ij = dm[ic*nao+jc];
for (i = 0; i < nao*nao; i++) {
vj[i] += eri[i] * dm_ij;
}
}
void CVHFics1_kl_s1ij(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
const int INC1 = 1;
int nn = nao * nao;
vj[ic*nao+jc] += ddot_(&nn, eri, &INC1, dm, &INC1);
}
void CVHFics1_jk_s1il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < nao; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
} }
}
void CVHFics1_il_s1jk(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < nao; l++, kl++) {
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
} }
}
void CVHFics2ij_ij_s1kl(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
int i;
double dm_ij;
if (ic > jc) {
dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic];
} else if (ic == jc) {
dm_ij = dm[ic*nao+ic];
} else {
return;
}
for (i = 0; i < nao*nao; i++) {
vj[i] += eri[i] * dm_ij;
}
}
void CVHFics2ij_kl_s2ij(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
if (ic < jc) {
return;
}
CVHFics1_kl_s1ij(eri, dm, vj, nao, ic, jc);
}
void CVHFics2ij_jk_s1il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
if (ic > jc) {
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < nao; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
}
}
} else if (ic == jc) {
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < nao; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[ic*nao+k];
}
}
}
}
void CVHFics2ij_il_s1jk(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
if (ic > jc) {
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < nao; l++, kl++) {
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
}
}
} else if (ic == jc) {
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < nao; l++, kl++) {
vk[ic*nao+k] += eri[kl] * dm[ic*nao+l];
}
}
}
}
void CVHFics2kl_ij_s2kl(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
int i, j, ij;
double dm_ij = dm[ic*nao+jc];
for (i = 0, ij = 0; i < nao; i++) {
for (j = 0; j <= i; j++, ij++) {
vj[i*nao+j] += eri[ij] * dm_ij;
}
}
}
void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj,
int nao, int ic, int jc)
{
CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc);
}
void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < k; l++, kl++) {
vk[ic*nao+l] += eri[kl] * dm[jc*nao+k];
vk[ic*nao+k] += eri[kl] * dm[jc*nao+l];
}
vk[ic*nao+k] += eri[kl] * dm[jc*nao+k];
kl++;
}
}
void CVHFics2kl_il_s1jk(double *eri, double *dm, double *vk,
int nao, int ic, int jc)
{
int k, l, kl;
for (k = 0, kl = 0; k < nao; k++) {
for (l = 0; l < k; l++, kl++) {
vk[jc*nao+l] += eri[kl] * dm[ic*nao+k];
vk[jc*nao+k] += eri[kl] * dm[ic*nao+l];
}
vk[jc*nao+k] += eri[kl] * dm[ic*nao+k];
kl++;
}
}
/**************************************************
* s8 8-fold symmetry: i>=j,k>=l,ij>=kl
* s4 4-fold symmetry: i>=j,k>=l
* s2ij 2-fold symmetry: i>=j
* s2kl 2-fold symmetry: k>=l
* s1 no permutation symmetry
**************************************************/
void CVHFnrs8_incore_drv(double *eri, double *dmj, double *vj,
double *dmk, double *vk,
int n, void (*const fvj)(), void (*const fvk)())
{
const int npair = n*(n+1)/2;
memset(vj, 0, sizeof(double)*n*n);
memset(vk, 0, sizeof(double)*n*n);
#pragma omp parallel default(none) \
shared(eri, dmj, dmk, vj, vk, n)
{
int i, j;
size_t ij, off;
double *vj_priv = calloc(n*n+2, sizeof(double));
double *vk_priv = calloc(n*n+2, sizeof(double));
#pragma omp for nowait schedule(dynamic, 4)
for (ij = 0; ij < npair; ij++) {
i = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
j = ij - i*(i+1)/2;
off = ij*(ij+1)/2;
(*fvj)(eri+off, dmj, vj_priv, n, i, j);
(*fvk)(eri+off, dmk, vk_priv, n, i, j);
}
#pragma omp critical
{
for (i = 0; i < n*n; i++) {
vj[i] += vj_priv[i];
vk[i] += vk_priv[i];
}
}
free(vj_priv);
free(vk_priv);
}
}
void CVHFnrs4_incore_drv(double *eri, double *dmj, double *vj,
double *dmk, double *vk,
int n, void (*const fvj)(), void (*const fvk)())
{
const int npair = n*(n+1)/2;
memset(vj, 0, sizeof(double)*n*n);
memset(vk, 0, sizeof(double)*n*n);
#pragma omp parallel default(none) \
shared(eri, dmj, dmk, vj, vk, n)
{
int i, j;
size_t ij, off;
double *vj_priv = calloc(n*n+2, sizeof(double));
double *vk_priv = calloc(n*n+2, sizeof(double));
#pragma omp for nowait schedule(dynamic, 4)
for (ij = 0; ij < npair; ij++) {
i = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
j = ij - i*(i+1)/2;
off = ij * npair;
(*fvj)(eri+off, dmj, vj_priv, n, i, j);
(*fvk)(eri+off, dmk, vk_priv, n, i, j);
}
#pragma omp critical
{
for (i = 0; i < n*n; i++) {
vj[i] += vj_priv[i];
vk[i] += vk_priv[i];
}
}
free(vj_priv);
free(vk_priv);
}
}
void CVHFnrs2ij_incore_drv(double *eri, double *dmj, double *vj,
double *dmk, double *vk,
int n, void (*const fvj)(), void (*const fvk)())
{
const int npair = n*(n+1)/2;
memset(vj, 0, sizeof(double)*n*n);
memset(vk, 0, sizeof(double)*n*n);
#pragma omp parallel default(none) \
shared(eri, dmj, dmk, vj, vk, n)
{
int i, j;
size_t ij, off;
double *vj_priv = calloc(n*n+2, sizeof(double));
double *vk_priv = calloc(n*n+2, sizeof(double));
#pragma omp for nowait schedule(dynamic, 4)
for (ij = 0; ij < npair; ij++) {
i = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
j = ij - i*(i+1)/2;
off = ij * n * n;
(*fvj)(eri+off, dmj, vj_priv, n, i, j);
(*fvk)(eri+off, dmk, vk_priv, n, i, j);
}
#pragma omp critical
{
for (i = 0; i < n*n; i++) {
vj[i] += vj_priv[i];
vk[i] += vk_priv[i];
}
}
free(vj_priv);
free(vk_priv);
}
}
void CVHFnrs2kl_incore_drv(double *eri, double *dmj, double *vj,
double *dmk, double *vk,
int n, void (*const fvj)(), void (*const fvk)())
{
const int npair = n*(n+1)/2;
memset(vj, 0, sizeof(double)*n*n);
memset(vk, 0, sizeof(double)*n*n);
#pragma omp parallel default(none) \
shared(eri, dmj, dmk, vj, vk, n)
{
int i, j;
size_t ij, off;
double *vj_priv = calloc(n*n+2, sizeof(double));
double *vk_priv = calloc(n*n+2, sizeof(double));
#pragma omp for nowait schedule(dynamic, 4)
for (ij = 0; ij < n*n; ij++) {
i = ij / n;
j = ij - i * n;
off = ij * npair;
(*fvj)(eri+off, dmj, vj_priv, n, i, j);
(*fvk)(eri+off, dmk, vk_priv, n, i, j);
}
#pragma omp critical
{
for (i = 0; i < n*n; i++) {
vj[i] += vj_priv[i];
vk[i] += vk_priv[i];
}
}
free(vj_priv);
free(vk_priv);
}
}
void CVHFnrs1_incore_drv(double *eri, double *dmj, double *vj,
double *dmk, double *vk,
int n, void (*const fvj)(), void (*const fvk)())
{
memset(vj, 0, sizeof(double)*n*n);
memset(vk, 0, sizeof(double)*n*n);
#pragma omp parallel default(none) \
shared(eri, dmj, dmk, vj, vk, n)
{
int i, j;
size_t ij, off;
double *vj_priv = calloc(n*n+2, sizeof(double));
double *vk_priv = calloc(n*n+2, sizeof(double));
#pragma omp for nowait schedule(dynamic, 4)
for (ij = 0; ij < n*n; ij++) {
i = ij / n;
j = ij - i * n;
off = ij * n * n;
(*fvj)(eri+off, dmj, vj_priv, n, i, j);
(*fvk)(eri+off, dmk, vk_priv, n, i, j);
}
#pragma omp critical
{
for (i = 0; i < n*n; i++) {
vj[i] += vj_priv[i];
vk[i] += vk_priv[i];
}
}
free(vj_priv);
free(vk_priv);
}
}
|
thread_info.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_UTIL_THREAD_INFO_H_
#define CORE_UTIL_THREAD_INFO_H_
#include <omp.h>
#include <sched.h>
#include <atomic>
#include <vector>
#include "core/util/log.h"
#include "core/util/numa.h"
namespace bdm {
/// \brief This class stores information about each thread. (e.g. to which NUMA
/// node it belongs to.)
/// NB: Threads **must** be bound to CPUs using `OMP_PROC_BIND=true`.
class ThreadInfo {
public:
static ThreadInfo* GetInstance() {
static ThreadInfo kInstance;
return &kInstance;
}
// FIXME add test
int GetMyThreadId() const { return omp_get_thread_num(); }
// FIXME add test
int GetMyNumaNode() const { return GetNumaNode(GetMyThreadId()); }
/// Return the numa thread id of an openmp thread.
int GetMyNumaThreadId() const { return GetNumaThreadId(GetMyThreadId()); }
/// Returns the number of NUMA nodes on this machine
int GetNumaNodes() const { return numa_nodes_; }
/// Returns the numa node the given openmp thread is bound to.
int GetNumaNode(int omp_thread_id) const {
return thread_numa_mapping_[omp_thread_id];
}
/// Returns the number of threads in a given NUMA node.
int GetThreadsInNumaNode(int numa_node) const {
return threads_in_numa_[numa_node];
}
/// Return the numa thread id of an openmp thread.
int GetNumaThreadId(int omp_thread_id) const {
return numa_thread_id_[omp_thread_id];
}
/// Return the maximum number of threads.
int GetMaxThreads() const { return max_threads_; }
/// Returns a unique thread id even for parallel regions that
/// don't use OpenMP.
uint64_t GetUniversalThreadId() const {
thread_local uint64_t kTid = thread_counter_++;
return kTid;
}
uint64_t GetMaxUniversalThreadId() const { return thread_counter_; }
/// Renews the metadata.\n
/// Whenever a thread is scheduled on a different cpu, e.g. using
/// `numa_run_on_node`, `Renew()` must be called to update the thread
/// metadata.
void Renew() {
max_threads_ = omp_get_max_threads();
numa_nodes_ = numa_num_configured_nodes();
thread_numa_mapping_.clear();
numa_thread_id_.clear();
threads_in_numa_.clear();
thread_numa_mapping_.resize(max_threads_, 0);
numa_thread_id_.resize(max_threads_, 0);
threads_in_numa_.resize(numa_nodes_, 0);
// (openmp thread id -> numa node)
#pragma omp parallel
{
int tid = omp_get_thread_num();
thread_numa_mapping_[tid] = numa_node_of_cpu(sched_getcpu());
}
// (numa -> number of associated threads), and
// (omp_thread_id -> thread id in numa)
for (uint16_t n = 0; n < numa_nodes_; n++) {
uint64_t cnt = 0;
for (uint64_t t = 0; t < max_threads_; t++) {
int numa = thread_numa_mapping_[t];
if (n == numa) {
numa_thread_id_[t] = cnt;
cnt++;
}
}
threads_in_numa_[n] = cnt;
}
}
friend std::ostream& operator<<(std::ostream& str, const ThreadInfo& ti) {
str << "max_threads\t\t: " << ti.max_threads_
<< "\nnum_numa nodes\t\t: " << ti.numa_nodes_;
str << "\nthread to numa mapping\t: ";
for (auto& el : ti.thread_numa_mapping_) {
str << el << " ";
}
str << "\nthread id in numa node\t: ";
for (auto& el : ti.numa_thread_id_) {
str << el << " ";
}
str << "\nnum threads per numa\t: ";
for (auto& el : ti.threads_in_numa_) {
str << el << " ";
}
str << "\n";
return str;
}
private:
static std::atomic<uint64_t> thread_counter_;
/// Maximum number of threads for this simulation.
uint64_t max_threads_;
/// Number of NUMA nodes on this machine.
uint16_t numa_nodes_;
/// Contains the mapping thread id -> numa node \n
/// vector position = omp_thread_id \n
/// vector value = numa node
std::vector<int> thread_numa_mapping_;
/// Contains the mapping omp_thread_id -> numa thread id \n
/// each thread in a numa domain has a unique id in the range 0 to number \n
/// of threads in this numa domain
std::vector<int> numa_thread_id_;
/// Contains the mapping numa node -> total number of threads in this numa
/// node \n
/// vector position: numa node \n
/// vector value number of threads
std::vector<int> threads_in_numa_;
ThreadInfo() {
auto proc_bind = omp_get_proc_bind();
if (proc_bind != 1 && proc_bind != 4) {
// 4 corresponds to OMP_PROC_BIND=spread
// Due to some reason some OpenMP implementations set proc bind to spread
// even though OMP_PROC_BIND is set to true.
// A performance analysis showed almost identical results between true,
// and spread.
Log::Warning(
"ThreadInfo::ThreadInfo",
"The environment variable OMP_PROC_BIND must be set to "
"true prior to running BioDynaMo ('export OMP_PROC_BIND=true')");
}
Renew();
}
};
} // namespace bdm
#endif // CORE_UTIL_THREAD_INFO_H_
|
Example_nesting_restrict.5.c | /*
* @@name: nesting_restrict.5c
* @@type: C
* @@compilable: no
* @@linkable: no
* @@expect: failure
*/
void work(int i, int j) {}
void wrong5(int n)
{
#pragma omp parallel
{
#pragma omp critical
{
work(n, 0);
/* incorrect nesting of barrier region in a critical region */
#pragma omp barrier
work(n, 1);
}
}
}
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,sizeof(distort_args));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
PixelInfo
invalid; /* the color to assign when distort result is invalid */
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0],coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3],coeff[4],coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates,
so that is what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel color to assign to distorted image */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DistortImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SparseColorTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
jacobi.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define pmax(x,y) ((x) > (y)? (x) : (y))
#define pmin(x,y) ((x) < (y)? (x) : (y))
#include "smoothers.h"
#include <math.h>
// cannot be 8 or below
#ifndef TS
#define TS 16
#endif
#ifndef T3
#define T3 64
#endif
#ifndef T4
#define T4 256
#endif
//void jacobi( GRID &u, GRID &f, GRID &tmp, int nu ) {
long long int jacobi( GRID &u, GRID &f, GRID &tmp, int nu ) {
int N = u.n;
int lda = u.lda;
int lda2 = u.lda * u.lda;
double hh = u.h*u.h;
double invhh = 1.0 / hh;
double DinvXomega = hh/6.0 * 8.0/9.0;
double* w = &(tmp.p[0][0][0]);
double* a = &(u.p[0][0][0]);
double* b = &(f.p[0][0][0]);
long long int count = 0;
#ifdef USE_MM_ALLOC
__assume_aligned(w,64);
__assume_aligned(a,64);
__assume_aligned(b,64);
#endif
if ((N >= 1) && (nu >= 1)) {
for (int t1=-1;t1<=floord(nu-2,8);t1++) {
int lbp=pmax(ceild(t1,2),ceild(16*t1-nu+3,16));
int ubp=pmin(floord(nu+N-2,16),floord(8*t1+N+7,16));
#pragma omp parallel for
for (int t2=lbp;t2<=ubp;t2++) {
for (int t3=pmax(pmax(0,ceild(t1-1,2)),ceild(16*t2-N-14,16));t3<=pmin(pmin(floord(nu+N-2,16),floord(8*t1+N+15,16)),floord(16*t2+N+14,16));t3++) {
for (int t4=pmax(pmax(pmax(0,ceild(t1-124,125)),ceild(16*t2-N-998,1000)),ceild(16*t3-N-998,1000));t4<=pmin(pmin(pmin(pmin(floord(8*t1-8*t2+N+7,500),floord(nu+N-2,1000)),floord(8*t1+N+15,1000)),floord(16*t2+N+14,1000)),floord(16*t3+N+14,1000));t4++) {
for (int t5=pmax(pmax(pmax(pmax(pmax(0,8*t1),16*t2-N),16*t3-N),1000*t4-N),16*t1-16*t2+1);t5<=pmin(pmin(pmin(pmin(pmin(nu-2,8*t1+15),16*t2+14),16*t3+14),1000*t4+998),16*t1-16*t2+N+15);t5++) {
for (int t6=pmax(pmax(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=pmin(pmin(16*t2+15,t5+N),-16*t1+16*t2+2*t5);t6++) {
for (int t7=pmax(16*t3,t5+1);t7<=pmin(16*t3+15,t5+N);t7++) {
int lbv= (-t5+t6)*lda2 + (-t5+t7)*lda -t5+pmax(1000*t4,t5+1);
int ubv= (-t5+t6)*lda2 + (-t5+t7)*lda -t5+pmin(1000*t4+999,t5+N);
int ks = lbv-lda;
int kn = lbv+lda;
int ke = lbv-1;
int kw = lbv+1;
int kf = lbv-lda2;
int kb = lbv+lda2;
if (t5%2==0) {
#pragma loop_count min(1),max(64),avg(32)
#pragma ivdep
#pragma vector always
for (int k=lbv;k<=ubv;k++) {
w[k] = a[k] - DinvXomega*((6.0*a[k]-a[kw]-a[ke]-a[kn]-a[ks]-a[kf]-a[kb])*invhh - b[k]);
kn++; ks++; ke++; kw++; kf++; kb++;
//count++;
}
} else {
#pragma loop_count min(1),max(64),avg(32)
#pragma ivdep
#pragma vector always
for (int k=lbv;k<=ubv;k++) {
a[k] = w[k] - DinvXomega*((6.0*w[k]-w[kw]-w[ke]-w[kn]-w[ks]-w[kf]-w[kb])*invhh - b[k]);
kn++; ks++; ke++; kw++; kf++; kb++;
// count++;
}
}
/*
lbv=pmax(1000*t4,t5+1);
ubv=pmin(1000*t4+999,t5+N);
#pragma ivdep
#pragma vector always
for (int t8=lbv;t8<=ubv;t8++) {
a[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (a[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] - (c * (((((((((6.0 * a[ t5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) - a[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]) * invhh) - b[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)])));;
}*/
}
}
}
}
}
}
}
}
if (nu%2==1) {
double*** t = u.p;
u.p = tmp.p;
tmp.p = t;
}
return count;
}
|
GB_binop__lor_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_fp32)
// A*D function (colscale): GB (_AxD__lor_fp32)
// D*A function (rowscale): GB (_DxB__lor_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_fp32)
// C=scalar+B GB (_bind1st__lor_fp32)
// C=scalar+B' GB (_bind1st_tran__lor_fp32)
// C=A+scalar GB (_bind2nd__lor_fp32)
// C=A'+scalar GB (_bind2nd_tran__lor_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_FP32 || GxB_NO_LOR_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
randomkit.c | /* MPY Random kit 1.0 */
/* static char const rcsid[] =
"@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <limits.h>
#include <math.h>
#include <assert.h>
#include <mkl_vsl.h>
#ifdef _WIN32
/*
* Windows
* XXX: we have to use this ugly defined(__GNUC__) because it is not easy to
* detect the compiler used in distutils itself
*/
#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND))
/*
* FIXME: ideally, we should set this to the real version of MSVCRT. We need
* something higher than 0x601 to enable _ftime64 and co
*/
#define __MSVCRT_VERSION__ 0x0700
#include <time.h>
#include <sys/timeb.h>
/*
* mingw msvcr lib import wrongly export _ftime, which does not exist in the
* actual msvc runtime for version >= 8; we make it an alias to _ftime64, which
* is available in those versions of the runtime
*/
#define _FTIME(x) _ftime64((x))
#else
#include <time.h>
#include <sys/timeb.h>
#define _FTIME(x) _ftime((x))
#endif
#ifndef RK_NO_WINCRYPT
/* Windows crypto */
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x0400
#endif
#include <windows.h>
#include <wincrypt.h>
#endif
#else
/* Unix */
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#endif
/*
* Do not move this include. randomkit.h must be included
* after windows timeb.h is included.
*/
#include "randomkit.h"
#define BRNG VSL_BRNG_MT2203
#ifndef RK_DEV_URANDOM
#define RK_DEV_URANDOM "/dev/urandom"
#endif
#ifndef RK_DEV_RANDOM
#define RK_DEV_RANDOM "/dev/random"
#endif
char *rk_strerror[RK_ERR_MAX] =
{
"no error",
"random device unvavailable"
};
void
rk_init(rk_state *state, int ndevice)
{
int i;
VSLStreamStatePtr stream;
state->num_device = ndevice;
for (i = 0; i < ndevice; ++i) {
state->rng_streams[i] = NULL;
}
//rk_randomseed(state);
}
void
rk_clean(rk_state *state)
{
int i;
VSLStreamStatePtr stream;
for (i = 0; i < state->num_device; ++i) {
stream = state->rng_streams[i];
#pragma omp target device(i) map(to: stream)
vslDeleteStream(&stream);
state->rng_streams[i] = NULL;
}
}
/* static functions */
static unsigned long rk_hash(unsigned long key);
void
rk_seed(unsigned long seed, rk_state *state)
{
int pos, i;
seed &= 0xffffffffUL;
VSLStreamStatePtr stream;
for (i = 0; i < state->num_device; ++i) {
stream = state->rng_streams[i];
#pragma omp target device(i) map(to:seed) map(tofrom: stream)
{
if (stream != NULL) {
vslDeleteStream(&stream);
}
vslNewStream(&stream, BRNG, seed);
}
state->rng_streams[i] = stream;
}
}
/* Thomas Wang 32 bits integer hash function */
static unsigned long
rk_hash(unsigned long key)
{
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
rk_error
rk_randomseed(rk_state *state)
{
#ifndef _WIN32
struct timeval tv;
#else
struct _timeb tv;
#endif
int i;
unsigned long buffer;
if (rk_devfill(&buffer, sizeof(unsigned long), 0) == RK_NOERR) {
/* ensures non-zero key */
buffer |= 0x80000000UL;
buffer &= 0xffffffffUL;
rk_seed(buffer, state);
return RK_NOERR;
}
#ifndef _WIN32
gettimeofday(&tv, NULL);
rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec)
^ rk_hash(clock()), state);
#else
_FTIME(&tv);
rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state);
#endif
return RK_ENODEV;
}
/*void
rk_fill(void *buffer, size_t size, rk_state *state)
{
}
*/
rk_error
rk_devfill(void *buffer, size_t size, int strong)
{
#ifndef _WIN32
FILE *rfile;
int done;
if (strong) {
rfile = fopen(RK_DEV_RANDOM, "rb");
}
else {
rfile = fopen(RK_DEV_URANDOM, "rb");
}
if (rfile == NULL) {
return RK_ENODEV;
}
done = fread(buffer, size, 1, rfile);
fclose(rfile);
if (done) {
return RK_NOERR;
}
#else
#ifndef RK_NO_WINCRYPT
HCRYPTPROV hCryptProv;
BOOL done;
if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT) || !hCryptProv) {
return RK_ENODEV;
}
done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer);
CryptReleaseContext(hCryptProv, 0);
if (done) {
return RK_NOERR;
}
#endif
#endif
return RK_ENODEV;
} |
omp_report_mask.c | /* Routine reports OpenMP process affinity information.
Get thread number and cpus (cpu_ids)
Create static space (proc_mask) to hold all masks (done in a single region)
Determine the mask for each thread (insert it in proc_mask)
print mask header (one thread in single region)
print mask (one thread in single region)
free spaces
return
Removed check: if(omp_get_num_procs() != ncpus){ on 2019-06-14
*/
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include "opts.h"
void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask, int tpc, char v);
int boundto(int* nelements_set, int* int_mask);
int get_threads_per_node();
void amask_omp(){
static int ncpus, nthrds;
int thrd; //Thread info
int nel_set;
static int ** proc_mask;
int i,j, ierr;
char * dummy;
static char v,p;
static int tpc; // hwthreads/core
Maskopts opts;
thrd = omp_get_thread_num();
#pragma omp single
{
// get print_speed fast or slow (f|c); listing cores or SMT (c|s)
p = opts.get_p();
v = opts.get_v();
tpc = get_threads_per_node();
nthrds = omp_get_num_threads();
ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN);
}
// Uh, omp_get_num_procs doesn't return processors on device as advertised.
// if numactl requests a subset of cpu-ids, it returns the count of bits set.
// So-- removing this "invalid" check. 6/14/2019
/*
if(omp_get_num_procs() != ncpus){
printf("ERROR: ncpus_by_omp=%d, ncpus_sched=%d\n",omp_get_num_procs(),ncpus);
exit(1);
}
*/
#pragma omp single
{
proc_mask = (int **) malloc(sizeof(int*)*nthrds);
for(i=0;i<nthrds;i++) proc_mask[i] = (int * ) malloc(sizeof(int)*ncpus );
for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0;
}
ierr = boundto(&nel_set,proc_mask[thrd]);
#pragma omp barrier
#pragma omp single
{
print_mask(1, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,v); //print header
for(thrd=0;thrd<nthrds;thrd++){
print_mask(0, dummy, 0, 0,thrd, ncpus, 1,nthrds, proc_mask[thrd],tpc,v);
if(p == 's') ierr=usleep(300000);
}
if(nthrds>50)
print_mask(2, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,v); //print header
for(i=0;i<nthrds;i++) free( proc_mask[i]);
free( proc_mask);
}
}
void amask_omp_(){ amask_omp(); }
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(8*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(4*t1+Nx+5,2048)),floord(8*t2+Nx+4,2048)),floord(8*t3+Nx+4,2048)),floord(8*t1-8*t2+Nz+Nx+3,2048));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),2048*t4+2046),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
csymm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsymm.c, normal z -> c, Fri Sep 28 17:38:02 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_symm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha \times A \times B + \beta \times C \f]
* or
* \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* where alpha and beta are scalars, A is a symmetric matrix and B and
* C are m-by-n matrices.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft,
* and is n otherwise. Only the uplo triangular part is referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,ka).
*
* @param[in] pB
* B is an ldb-by-n matrix, where the leading m-by-n part of
* the array B must contain the matrix B.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the array is overwritten by the m-by-n updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_csymm
* @sa plasma_csymm
* @sa plasma_dsymm
* @sa plasma_ssymm
*
******************************************************************************/
int plasma_csymm(plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
plasma_complex32_t alpha, plasma_complex32_t *pA, int lda,
plasma_complex32_t *pB, int ldb,
plasma_complex32_t beta, plasma_complex32_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((uplo != PlasmaLower) && (uplo != PlasmaUpper)) {
plasma_error("illegal value of uplo");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int am;
if (side == PlasmaLeft)
am = m;
else
am = n;
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -9;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -12;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_symm(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
am, am, 0, 0, am, am, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_cge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_csymm(side, uplo,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_symm
*
* Performs symmetric matrix multiplication.
* Non-blocking tile version of plasma_csymm().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
*******************************************************************************
*
* @sa plasma_csymm
* @sa plasma_omp_csymm
* @sa plasma_omp_dsymm
* @sa plasma_omp_ssymm
*
******************************************************************************/
void plasma_omp_csymm(plasma_enum_t side, plasma_enum_t uplo,
plasma_complex32_t alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_complex32_t beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaLower) &&
(uplo != PlasmaUpper)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || A.n == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pcsymm(side, uplo,
alpha, A,
B,
beta, C,
sequence, request);
}
|
GB_msort_2.c | //------------------------------------------------------------------------------
// GB_msort_2: sort a 2-by-n list of integers, using A[0:1][ ] as the key
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// A parallel mergesort of an array of 2-by-n integers. Each key consists
// of two integers.
#include "GB_msort_2.h"
#include "LAGraph_internal.h"
//------------------------------------------------------------------------------
// GB_merge_sequential_2: merge two sorted lists via a single thread
//------------------------------------------------------------------------------
// merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */
#if defined ( _OPENMP )
#include <omp.h>
#define GB_OPENMP_THREAD_ID omp_get_thread_num ( )
#define GB_OPENMP_MAX_THREADS omp_get_max_threads ( )
#define GB_OPENMP_GET_NUM_THREADS omp_get_num_threads ( )
#else
#define GB_OPENMP_THREAD_ID (0)
#define GB_OPENMP_MAX_THREADS (1)
#define GB_OPENMP_GET_NUM_THREADS (1)
#endif
static void GB_merge_sequential_2
(
int64_t *restrict S_0, // output of length nleft + nright
int64_t *restrict S_1,
const int64_t *restrict Left_0, // left input of length nleft
const int64_t *restrict Left_1,
const int64_t nleft,
const int64_t *restrict Right_0, // right input of length nright
const int64_t *restrict Right_1,
const int64_t nright
)
{
int64_t p, pleft, pright ;
// merge the two inputs, Left and Right, while both inputs exist
for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++)
{
if (GB_lt_2 (Left_0, Left_1, pleft, Right_0, Right_1, pright))
{
// S [p] = Left [pleft++]
S_0 [p] = Left_0 [pleft] ;
S_1 [p] = Left_1 [pleft] ;
pleft++ ;
}
else
{
// S [p] = Right [pright++]
S_0 [p] = Right_0 [pright] ;
S_1 [p] = Right_1 [pright] ;
pright++ ;
}
}
// either input is exhausted; copy the remaining list into S
if (pleft < nleft)
{
int64_t nremaining = (nleft - pleft) ;
memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ;
}
else if (pright < nright)
{
int64_t nremaining = (nright - pright) ;
memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ;
}
}
//------------------------------------------------------------------------------
// GB_merge_parallel_2: parallel merge
//------------------------------------------------------------------------------
// The two input arrays, Bigger [0..nbigger-1] and Smaller [0..nsmaller-1], are
// sorted. They are merged into the output array S [0..nleft+nright-1], using
// a parallel merge. nbigger >= nsmaller always holds.
void GB_merge_parallel_2 // parallel merge
(
int64_t *restrict S_0, // output of length nbigger + nsmaller
int64_t *restrict S_1,
const int64_t *restrict Bigger_0, // Bigger [0..nbigger-1]
const int64_t *restrict Bigger_1,
const int64_t nbigger,
const int64_t *restrict Smaller_0, // Smaller [0..nsmaller-1]
const int64_t *restrict Smaller_1,
const int64_t nsmaller
)
{
//--------------------------------------------------------------------------
// split the bigger input in half
//--------------------------------------------------------------------------
// The first task will handle Bigger [0..nhalf-1], and the second task
// will handle Bigger [nhalf..n-1].
int64_t nhalf = nbigger/2 ;
int64_t Pivot_0 [1] ; Pivot_0 [0] = Bigger_0 [nhalf] ;
int64_t Pivot_1 [1] ; Pivot_1 [0] = Bigger_1 [nhalf] ;
//--------------------------------------------------------------------------
// find where the Pivot appears in the smaller list
//--------------------------------------------------------------------------
// This is like GB_BINARY_TRIM_SEARCH, but applied to a 2-by-n array.
// binary search of Smaller [0..nsmaller-1] for the Pivot
long pleft = 0, pright = nsmaller-1 ;
while (pleft < pright)
{
long pmiddle = (pleft + pright) / 2 ;
if (GB_lt_2 (Smaller_0, Smaller_1, pmiddle, Pivot_0, Pivot_1, 0))
{
// if in the list, Pivot appears in [pmiddle+1..pright]
pleft = pmiddle + 1 ;
}
else
{
// if in the list, Pivot appears in [pleft..pmiddle]
pright = pmiddle ;
}
}
// binary search is narrowed down to a single item
// or it has found the list is empty:
ASSERT (pleft == pright || pleft == pright + 1) ;
// If found is true then Smaller [pleft == pright] == Pivot. If duplicates
// appear then Smaller [pleft] is any one of the entries equal to the Pivot
// in the list. If found is false then
// Smaller [original_pleft ... pleft-1] < Pivot and
// Smaller [pleft+1 ... original_pright] > Pivot holds.
// The value Smaller [pleft] may be either < or > Pivot.
bool found = (pleft == pright &&
Smaller_0 [pleft] == Pivot_0 [0] &&
Smaller_1 [pleft] == Pivot_1 [0]) ;
// Modify pleft and pright:
if (!found && (pleft == pright))
{
if (GB_lt_2 (Smaller_0, Smaller_1, pleft, Pivot_0, Pivot_1, 0))
{
pleft++ ;
}
else
{
pright++ ;
}
}
// Now the following conditions hold:
// If found is false then
// Smaller [original_pleft ... pleft-1] < Pivot and
// Smaller [pleft ... original_pright] > Pivot holds,
// and pleft-1 == pright
// If Smaller has no duplicates, then whether or not Pivot is found,
// Smaller [original_pleft ... pleft-1] < Pivot and
// Smaller [pleft ... original_pright] >= Pivot holds.
//--------------------------------------------------------------------------
// merge each part in parallel
//--------------------------------------------------------------------------
// The first task merges Bigger [0..nhalf-1] and Smaller [0..pleft-1] into
// the output S [0..nhalf+pleft-1]. The entries in Bigger [0..nhalf-1] are
// all < Pivot (if no duplicates appear in Bigger) or <= Pivot otherwise.
int64_t *restrict S_task0_0 = S_0 ;
int64_t *restrict S_task0_1 = S_1 ;
const int64_t *restrict Left_task0_0 = Bigger_0 ;
const int64_t *restrict Left_task0_1 = Bigger_1 ;
const int64_t nleft_task0 = nhalf ;
const int64_t *restrict Right_task0_0 = Smaller_0 ;
const int64_t *restrict Right_task0_1 = Smaller_1 ;
const int64_t nright_task0 = pleft ;
// The second task merges Bigger [nhalf..nbigger-1] and
// Smaller [pleft..nsmaller-1] into the output S [nhalf+pleft..n-1].
// The entries in Bigger [nhalf..nbigger-1] and Smaller [pleft..nsmaller-1]
// are all >= Pivot.
int64_t *restrict S_task1_0 = S_0 + nhalf + pleft ;
int64_t *restrict S_task1_1 = S_1 + nhalf + pleft ;
const int64_t *restrict Left_task1_0 = Bigger_0 + nhalf ;
const int64_t *restrict Left_task1_1 = Bigger_1 + nhalf ;
const int64_t nleft_task1 = (nbigger - nhalf) ;
const int64_t *restrict Right_task1_0 = Smaller_0 + pleft ;
const int64_t *restrict Right_task1_1 = Smaller_1 + pleft ;
const int64_t nright_task1 = (nsmaller - pleft) ;
#pragma omp task firstprivate(S_task0_0, S_task0_1, \
Left_task0_0, Left_task0_1, nleft_task0, \
Right_task0_0, Right_task0_1, nright_task0)
GB_merge_select_2 (S_task0_0, S_task0_1,
Left_task0_0, Left_task0_1, nleft_task0,
Right_task0_0, Right_task0_1, nright_task0) ;
#pragma omp task firstprivate(S_task1_0, S_task1_1, \
Left_task1_0, Left_task1_1, nleft_task1, \
Right_task1_0, Right_task1_1, nright_task1)
GB_merge_select_2 (S_task1_0, S_task1_1,
Left_task1_0, Left_task1_1, nleft_task1,
Right_task1_0, Right_task1_1, nright_task1) ;
#pragma omp taskwait
}
//------------------------------------------------------------------------------
// GB_merge_select_2: parallel or sequential merge
//------------------------------------------------------------------------------
// The two input arrays, Left [0..nleft-1] and Right [0..nright-1], are sorted.
// They are merged into the output array S [0..nleft+nright-1], using either
// the sequential merge (for small lists) or the parallel merge (for big
// lists).
void GB_merge_select_2 // parallel or sequential merge of 2-by-n arrays
(
int64_t *restrict S_0, // output of length nleft+nright
int64_t *restrict S_1,
const int64_t *restrict Left_0, // Left [0..nleft-1]
const int64_t *restrict Left_1,
const int64_t nleft,
const int64_t *restrict Right_0, // Right [0..nright-1]
const int64_t *restrict Right_1,
const int64_t nright
)
{
if (nleft + nright < GB_BASECASE)
{
// sequential merge
GB_merge_sequential_2 (S_0, S_1,
Left_0, Left_1, nleft,
Right_0, Right_1, nright) ;
}
else if (nleft >= nright)
{
// parallel merge, where Left [0..nleft-1] is the bigger of the two.
GB_merge_parallel_2 (S_0, S_1,
Left_0, Left_1, nleft,
Right_0, Right_1, nright) ;
}
else
{
// parallel merge, where Right [0..nright-1] is the bigger of the two.
GB_merge_parallel_2 (S_0, S_1,
Right_0, Right_1, nright,
Left_0, Left_1, nleft) ;
}
}
//------------------------------------------------------------------------------
// GB_mergesort_2: parallel merge sort of a 2-by-n array
//------------------------------------------------------------------------------
// GB_mergesort_2 sorts an int64_t array A of size 2-by-n in ascending
// order, using a parallel mergesort. W is a workspace array of size 2-by-n.
// Small arrays are sorted with a quicksort method.
void GB_mergesort_2 // sort array A of size 2-by-n, using 2 keys (A [0:1][])
(
int64_t *restrict A_0, // size n array
int64_t *restrict A_1, // size n array
int64_t *restrict W_0, // size n array, workspace
int64_t *restrict W_1, // size n array, workspace
const int64_t n
)
{
if (n <= GB_BASECASE)
{
// ---------------------------------------------------------------------
// sequential quicksort; no workspace needed
// ---------------------------------------------------------------------
GB_qsort_2 (A_0, A_1, n) ;
}
else
{
// ---------------------------------------------------------------------
// recursive merge sort if A has length greater than GB_BASECASE
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// split A into four quarters
// ---------------------------------------------------------------------
int64_t n12 = n / 2 ; // split n into n12 and n34
int64_t n34 = n - n12 ;
int64_t n1 = n12 / 2 ; // split n12 into n1 and n2
int64_t n2 = n12 - n1 ;
int64_t n3 = n34 / 2 ; // split n34 into n3 and n4
int64_t n4 = n34 - n3 ;
int64_t n123 = n12 + n3 ; // start of 4th quarter = n1 + n2 + n3
// 1st quarter of A and W
int64_t *restrict A_1st0 = A_0 ;
int64_t *restrict A_1st1 = A_1 ;
int64_t *restrict W_1st0 = W_0 ;
int64_t *restrict W_1st1 = W_1 ;
// 2nd quarter of A and W
int64_t *restrict A_2nd0 = A_0 + n1 ;
int64_t *restrict A_2nd1 = A_1 + n1 ;
int64_t *restrict W_2nd0 = W_0 + n1 ;
int64_t *restrict W_2nd1 = W_1 + n1 ;
// 3rd quarter of A and W
int64_t *restrict A_3rd0 = A_0 + n12 ;
int64_t *restrict A_3rd1 = A_1 + n12 ;
int64_t *restrict W_3rd0 = W_0 + n12 ;
int64_t *restrict W_3rd1 = W_1 + n12 ;
// 4th quarter of A and W
int64_t *restrict A_4th0 = A_0 + n123 ;
int64_t *restrict A_4th1 = A_1 + n123 ;
int64_t *restrict W_4th0 = W_0 + n123 ;
int64_t *restrict W_4th1 = W_1 + n123 ;
// ---------------------------------------------------------------------
// sort each quarter of A in parallel, using W as workspace
// ---------------------------------------------------------------------
#pragma omp task \
firstprivate(A_1st0, A_1st1, W_1st0, W_1st1, n1)
GB_mergesort_2 (A_1st0, A_1st1, W_1st0, W_1st1, n1) ;
#pragma omp task \
firstprivate(A_2nd0, A_2nd1, W_2nd0, W_2nd1, n2)
GB_mergesort_2 (A_2nd0, A_2nd1, W_2nd0, W_2nd1, n2) ;
#pragma omp task \
firstprivate(A_3rd0, A_3rd1, W_3rd0, W_3rd1, n3)
GB_mergesort_2 (A_3rd0, A_3rd1, W_3rd0, W_3rd1, n3) ;
#pragma omp task \
firstprivate(A_4th0, A_4th1, W_4th0, W_4th1, n4)
GB_mergesort_2 (A_4th0, A_4th1, W_4th0, W_4th1, n4) ;
#pragma omp taskwait
// ---------------------------------------------------------------------
// merge pairs of quarters of A into two halves of W, in parallel
// ---------------------------------------------------------------------
#pragma omp task firstprivate( \
W_1st0, W_1st1, A_1st0, A_1st1, n1, A_2nd0, A_2nd1, n2)
GB_merge_select_2 (
W_1st0, W_1st1, A_1st0, A_1st1, n1, A_2nd0, A_2nd1, n2) ;
#pragma omp task firstprivate( \
W_3rd0, W_3rd1, A_3rd0, A_3rd1, n3, A_4th0, A_4th1, n4)
GB_merge_select_2 (
W_3rd0, W_3rd1, A_3rd0, A_3rd1, n3, A_4th0, A_4th1, n4) ;
#pragma omp taskwait
// ---------------------------------------------------------------------
// merge the two halves of W into A
// ---------------------------------------------------------------------
GB_merge_select_2 (A_0, A_1, W_1st0, W_1st1, n12, W_3rd0, W_3rd1, n34) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_2: gateway for parallel merge sort
//------------------------------------------------------------------------------
void GB_msort_2 // sort array A of size 2-by-n, using 2 keys (A [0:1][])
(
int64_t *restrict A_0, // size n array
int64_t *restrict A_1, // size n array
int64_t *restrict W_0, // size n array, workspace
int64_t *restrict W_1, // size n array, workspace
const int64_t n,
const int nthreads // # of threads to use
)
{
if (GB_OPENMP_GET_NUM_THREADS > 1)
{
// ---------------------------------------------------------------------
// parallel mergesort: already in parallel region
// ---------------------------------------------------------------------
// GB_msort_2 is already in a parallel region in the caller. This
// does not occur inside GraphBLAS, but the user application might be
// calling GraphBLAS inside its own parallel region.
GB_mergesort_2 (A_0, A_1, W_0, W_1, n) ;
}
else
{
// ---------------------------------------------------------------------
// parallel mergesort: start a parallel region
// ---------------------------------------------------------------------
#pragma omp parallel num_threads(nthreads)
#pragma omp master
GB_mergesort_2 (A_0, A_1, W_0, W_1, n) ;
}
}
|
debug_task_shared.c | // This testcase checks emission of debug info for variables
// inside shared clause of task construct.
// REQUIRES: x86_64-linux
// RUN: %clang_cc1 -debug-info-kind=constructor -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK
// RUN: %clang_cc1 -debug-info-kind=constructor -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG
// expected-no-diagnostics
// CHECK-LABEL: define internal i32 @.omp_task_entry.
// CHECK-DAG: [[CONTEXT:%[0-9]+]] = load %struct.anon*, %struct.anon** %__context.addr.i, align 8
// CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE2:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE3:![0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
// CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE1:![0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_deref))
// CHECK-DAG: [[SHARE2]] = !DILocalVariable(name: "share2"
// CHECK-DAG: [[SHARE3]] = !DILocalVariable(name: "share3"
// CHECK-DAG: [[SHARE1]] = !DILocalVariable(name: "share1"
// NEG-LABEL: define internal i32 @.omp_task_entry.
// NEG: [[CONTEXT:%[0-9]+]] = load %struct.anon*, %struct.anon** %__context.addr.i, align 8
// NEG-NOT: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata {{![0-9]+}}, metadata !DIExpression(DW_OP_deref))
extern int printf(const char *, ...);
int foo(int n) {
int share1 = 9, share2 = 11, share3 = 13, priv1, priv2, fpriv;
fpriv = n + 4;
if (n < 2)
return n;
else {
#if SHARED
#pragma omp task shared(share1, share2) private(priv1, priv2) firstprivate(fpriv) shared(share3)
#else
#pragma omp task private(priv1, priv2) firstprivate(fpriv)
#endif
{
priv1 = n;
priv2 = n + 2;
share2 += share3;
printf("share1 = %d, share2 = %d, share3 = %d\n", share1, share2, share3);
share1 = priv1 + priv2 + fpriv + foo(n - 1) + share2 + share3;
}
#pragma omp taskwait
return share1 + share2 + share3;
}
}
int main() {
int n = 10;
printf("foo(%d) = %d\n", n, foo(n));
return 0;
}
|
mafillpmain.c | /* CalculiX - A 3-dimensional finite element program */
/* Copyright (C) 1998-2015 Guido Dhondt */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU General Public License as */
/* published by the Free Software Foundation(version 2); */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <pthread.h>
#include "CalculiX.h"
static char *lakonf1;
static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*ielfa1,
*ifabou1,*neq1,*nzs1,*neij1;
static double *vfa1,*area1,*advfa1,*xlet1,*cosa1,*volume1,*au1=NULL,*ad1=NULL,
*ap1,*xle1,*b1=NULL,*xxn1,*hfa1,*gradpel1,*bp1,*xxi1,*xlen1,*cosb1;
void mafillpmain(ITG *nef,char *lakonf,ITG *ipnei,
ITG *neifa,ITG *neiel,double *vfa,double *area,double *advfa,
double *xlet,double *cosa,double *volume,double *au,double *ad,
ITG *jq,ITG *irow,double *ap,ITG *ielfa,ITG *ifabou,
double *xle,double *b,double *xxn,ITG *neq,
ITG *nzs,double *hfa,double *gradpel,
double *bp,double *xxi,ITG *neij,double *xlen,double *cosb,
ITG *iatleastonepressurebc){
ITG i,j;
/* variables for multithreading procedure */
ITG sys_cpus,*ithread=NULL;
char *env,*envloc,*envsys;
num_cpus = 0;
sys_cpus=0;
/* explicit user declaration prevails */
envsys=getenv("NUMBER_OF_CPUS");
if(envsys){
sys_cpus=atoi(envsys);
if(sys_cpus<0) sys_cpus=0;
}
/* automatic detection of available number of processors */
if(sys_cpus==0){
sys_cpus = getSystemCPUs();
if(sys_cpus<1) sys_cpus=1;
}
/* local declaration prevails, if strictly positive */
envloc = getenv("CCX_NPROC_CFD");
if(envloc){
num_cpus=atoi(envloc);
if(num_cpus<0){
num_cpus=0;
}else if(num_cpus>sys_cpus){
num_cpus=sys_cpus;
}
}
/* else global declaration, if any, applies */
env = getenv("OMP_NUM_THREADS");
if(num_cpus==0){
if (env)
num_cpus = atoi(env);
if (num_cpus < 1) {
num_cpus=1;
}else if(num_cpus>sys_cpus){
num_cpus=sys_cpus;
}
}
// next line is to be inserted in a similar way for all other paralell parts
if(*nef<num_cpus) num_cpus=*nef;
pthread_t tid[num_cpus];
/* allocating fields for lhs and rhs matrix */
NNEW(ad1,double,num_cpus**neq);
NNEW(au1,double,(long long)num_cpus**nzs);
NNEW(b1,double,num_cpus**neq);
/* calculating the stiffness and/or mass matrix
(symmetric part) */
nef1=nef;lakonf1=lakonf;ipnei1=ipnei;neifa1=neifa;neiel1=neiel;
vfa1=vfa;area1=area;advfa1=advfa;xlet1=xlet,cosa1=cosa;volume1=volume;
jq1=jq;irow1=irow;ap1=ap;ielfa1=ielfa;ifabou1=ifabou;xle1=xle;
xxn1=xxn;neq1=neq;nzs1=nzs;hfa1=hfa;gradpel1=gradpel;bp1=bp;xxi1=xxi;
neij1=neij;xlen1=xlen;cosb1=cosb;
/* create threads and wait */
NNEW(ithread,ITG,num_cpus);
for(i=0; i<num_cpus; i++) {
ithread[i]=i;
pthread_create(&tid[i], NULL, (void *)mafillpmt, (void *)&ithread[i]);
}
for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL);
SFREE(ithread);
/* copying and accumulating the stiffnes and/or mass matrix */
#pragma omp parallel \
default(none) \
shared(neq,ad,ad1,num_cpus,nzs,au,au1,b,b1) \
private(i,j)
{
#pragma omp for
for(i=0;i<*neq;i++){
ad[i]=ad1[i];
for(j=1;j<num_cpus;j++){
ad[i]+=ad1[i+j**neq];
}
}
#pragma omp for
for(i=0;i<*nzs;i++){
au[i]=au1[i];
for(j=1;j<num_cpus;j++){
au[i]+=au1[i+(long long)j**nzs];
}
}
#pragma omp for
for(i=0;i<*neq;i++){
b[i]=b1[i];
for(j=1;j<num_cpus;j++){
b[i]+=b1[i+j**neq];
}
}
}
SFREE(ad1);
SFREE(au1);
SFREE(b1);
FORTRAN(mafillpbc,(nef,au,ad,jq,irow,b,iatleastonepressurebc,nzs));
return;
}
/* subroutine for multithreading of mafillp */
void *mafillpmt(ITG *i){
ITG indexad,indexb,nefa,nefb,nefdelta;
long long indexau;
indexad=*i**neq1;
indexau=(long long)*i**nzs1;
indexb=*i**neq1;
// ceil -> floor
nefdelta=(ITG)floor(*nef1/(double)num_cpus);
nefa=*i*nefdelta+1;
nefb=(*i+1)*nefdelta;
// next line! -> all parallel sections
if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1;
FORTRAN(mafillp,(nef1,lakonf1,ipnei1,neifa1,neiel1,vfa1,area1,
advfa1,xlet1,cosa1,volume1,&au1[indexau],&ad1[indexad],
jq1,irow1,ap1,ielfa1,ifabou1,xle1,&b1[indexb],xxn1,neq1,nzs1,
hfa1,gradpel1,bp1,xxi1,neij1,xlen1,cosb1,&nefa,&nefb));
return NULL;
}
|
GB_binop__isge_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16)
// A*D function (colscale): GB (_AxD__isge_uint16)
// D*A function (rowscale): GB (_DxB__isge_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16)
// C=scalar+B GB (_bind1st__isge_uint16)
// C=scalar+B' GB (_bind1st_tran__isge_uint16)
// C=A+scalar GB (_bind2nd__isge_uint16)
// C=A'+scalar GB (_bind2nd_tran__isge_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "matrix.h"
#include "utils.h"
matrix* initMatrix(matrix* mat, int height, int width) {
mat->vals = (netF*)malloc(sizeof(netF) * width * height);
//mat->vals = (netF*)_aligned_malloc(sizeof(netF) * width * height, 16);
mat->width = width;
mat->height = height;
return mat;
}
matrix* createMatrix(int height, int width) {
return initMatrix((matrix*)malloc(sizeof(matrix)), height, width);
}
matrix* createIdentityMatrix(int height, int width) {
return fillMatrixIdentity(createMatrix(height, width));
}
matrix* createMatrixWithValues(int height, int width, netF* vals) {
return setMatrixValues(createMatrix(height, width), vals);
}
matrix* fillMatrixZero(matrix* m) {
memset(m->vals, 0, sizeof(netF) * m->width * m->height);
return m;
}
matrix* fillMatrixRandom(matrix* m) {
int i;
for (i = 0; i < (m->width * m->height); i++) {
m->vals[i] = randomNetF() * 2 - 1;
}
return m;
}
matrix* fillMatrixIdentity(matrix* m) {
int minDim = (m->width < m->height) ? m->width : m->height;
int idx;
fillMatrixZero(m);
for (idx = 0; idx < minDim; idx++) {
m->vals[idx * m->width + idx] = 1;
}
return m;
}
void clearMatrix(matrix* mat) {
free(mat->vals);
mat->vals = NULL;
}
void deleteMatrix(matrix* mat) {
clearMatrix(mat);
free(mat);
}
void writeMatrixRow(FILE *stream, matrix* mat, char colSep, char rowSep, int row) {
int lastCol = mat->width - 1;
int col;
for (col = 0; col <lastCol;col++) {
fprintf(stream, "%f%c", mat->vals[row * mat->width + col], colSep);
}
fprintf(stream, "%f%c", mat->vals[row * mat->width + col], rowSep);
}
matrix* writeMatrix(FILE *stream, matrix* mat, char colSep, char rowSep) {
int row;
for (row = 0; row < mat->height; row++) {
writeMatrixRow(stream, mat, colSep, rowSep, row);
}
fflush(stream);
return mat;
}
matrix* printMatrix(matrix* mat) {
return writeMatrix(stdout, mat, ' ', '\n');
}
matrix* setMatrixVal(matrix* mat, int row, int col, netF val) {
if ((col < mat->width) && (row < mat->height)) {
mat->vals[row * mat->width + col] = val;
}
return mat;
}
matrix* setMatrixValues(matrix *mat, netF *dubs) {
memcpy(mat->vals, dubs, sizeof(netF) * mat->width * mat->height);
return mat;
}
netF* getMatrixVal(matrix* mat, int row, int col) {
return &(mat->vals[mat->width * row + col]);
}
void validateSuppliedMatrix(matrix* supplied, int height, int width) {
if (supplied == NULL) {
PRINT_FLUSH(1, "Supplied matrix is NULL.");
exit(1);
} else if ((supplied->width != width) || (supplied->height != height)) {
PRINT_FLUSH(1, "Supplied matrix is of the wrong size %dx%d instead of %dx%d",
supplied->height, supplied->width, height, width);
exit(1);
}
}
static __inline void innerMultiplyMatrices(const int leftHeight, const int leftWidth, const int rightWidth,
const netF* leftVals, const netF* rightVals, netF* matVals) {
int row;
for (row = 0; row < leftHeight; row++) {
int col;
for (col = 0; col < rightWidth; col++) {
netF sum = 0;
int idx;
for (idx = 0; idx < leftWidth; idx++) {
sum += leftVals[row * leftWidth + idx] * rightVals[rightWidth * idx + col];
}
matVals[col + row * rightWidth] = sum;
}
}
}
matrix* multiplyMatrices(matrix* left, matrix* right, matrix* result) {
int leftWidth = left->width;
int leftHeight = left->height;
int rightWidth = right->width;
int rightHeight = right->height;
if (leftWidth != rightHeight) {
return NULL;
}
validateSuppliedMatrix(result, leftHeight, rightWidth);
innerMultiplyMatrices(leftHeight, leftWidth, rightWidth, left->vals, right->vals, result->vals);
return result;
}
matrix* elementMultMatrices(matrix* left, matrix* right, matrix* result) {
int leftWidth = left->width;
int leftHeight = left->height;
int rightWidth = right->width;
int rightHeight = right->height;
if ((leftWidth != rightWidth) || (leftHeight != rightHeight)) {
return NULL;
}
validateSuppliedMatrix(result, leftHeight, leftWidth);
int idx;
for (idx = 0; idx < leftWidth * leftHeight; idx++) {
result->vals[idx] = left->vals[idx] * right->vals[idx];
}
return result;
}
matrix* expandMultMatrices(matrix* left, matrix* right, matrix* result) {
int leftWidth = left->width;
int leftHeight = left->height;
int rightWidth = right->width;
int rightHeight = right->height;
if (leftHeight != rightHeight) {
return NULL;
}
int totalWidth = leftWidth * rightWidth;
validateSuppliedMatrix(result, leftHeight, totalWidth);
int row;
for (row = 0; row < leftHeight; row++) {
int leftCol;
for (leftCol = 0; leftCol < leftWidth; leftCol++) {
int rightCol;
for (rightCol = 0; rightCol < rightWidth; rightCol++) {
result->vals[row * totalWidth + leftCol * rightWidth + rightCol] =
left->vals[row * leftWidth + leftCol] * right->vals[row * rightWidth + rightCol];
}
}
}
return result;
}
matrix* expandMultCollapseMatrices(matrix* left, matrix *right, matrix* result) {
int leftWidth = left->width;
int leftHeight = left->height;
int rightWidth = right->width;
int rightHeight = right->height;
if (leftHeight != rightHeight) {
return NULL;
}
int totalWidth = leftWidth * rightWidth;
validateSuppliedMatrix(result, 1, totalWidth);
fillMatrixZero(result);
int row;
for (row = 0; row < leftHeight; row++) {
int leftCol;
for (leftCol = 0; leftCol < leftWidth; leftCol++) {
netF leftVal = left->vals[row * leftWidth + leftCol];
int rightCol;
for (rightCol = 0; rightCol < rightWidth; rightCol++) {
result->vals[leftCol * rightWidth + rightCol] += leftVal * right->vals[row * rightWidth + rightCol];
}
}
}
int col;
for (col = 0; col < totalWidth; col++) {
result->vals[col] /= leftHeight;
}
return result;
}
static __inline netF dotProduct(const netF* left, const netF* right, const int count) {
netF sum = 0;
int idx = 0;
for (idx = 0; idx < count; idx++) {
sum += left[idx] * right[idx];
}
return sum;
}
static __inline void innerTransExpandMultCollapseMatrices(const int leftHeight, const int rightHeight,
const int leftWidth, const netF* leftVals, const netF* rightVals, netF* result) {
int leftRow;
#pragma omp parallel for schedule(dynamic, 16)
for (leftRow = 0; leftRow < leftHeight; leftRow++) {
int rightRow = 0;
for (rightRow = 0; rightRow < rightHeight; rightRow++) {
netF sum = 0;
result[leftRow * rightHeight + rightRow] = dotProduct(
leftVals + leftRow * leftWidth, rightVals + rightRow * leftWidth, leftWidth);
}
}
}
matrix* transExpandMultCollapseMatrices(matrix* left, matrix *right, matrix* result) {
int leftWidth = left->width;
int leftHeight = left->height;
int rightWidth = right->width;
int rightHeight = right->height;
if (leftWidth != rightWidth) {
return NULL;
}
int totalHeight = leftHeight * rightHeight;
validateSuppliedMatrix(result, 1, totalHeight);
innerTransExpandMultCollapseMatrices(leftHeight, rightHeight,
leftWidth, left->vals, right->vals, result->vals);
int col;
for (col = 0; col < totalHeight; col++) {
result->vals[col] /= leftWidth;
}
return result;
}
static __inline void innerTransMultiplyMatrices(const int leftHeight, const int leftWidth, const int rightHeight,
const netF* leftVals, const netF* rightVals, netF* matVals) {
int row;
#pragma omp parallel for schedule(dynamic, 16)
for (row = 0; row < leftHeight; row++) {
for (int col = 0; col < rightHeight; col++) {
matVals[row * rightHeight + col] = dotProduct(
leftVals + row * leftWidth, rightVals + leftWidth * col, leftWidth);
}
}
}
matrix* cpuTransMultiplyMatrices(matrix* left, matrix* right, matrix* result) {
int leftWidth = left->width;
int leftHeight = left->height;
int rightWidth = right->width;
int rightHeight = right->height;
if (leftWidth != rightWidth) {
return NULL;
}
validateSuppliedMatrix(result, leftHeight, rightHeight);
innerTransMultiplyMatrices(leftHeight, leftWidth, rightHeight, left->vals, right->vals, result->vals);
return result;
}
matrix* subtractMatrices(matrix* left, matrix* right, matrix* result) {
if ((left->width != right->width) || (left->height != right->height)) {
return NULL;
}
validateSuppliedMatrix(result, left->height, left->width);
int row;
for (row = 0; row < result->height; row++) {
int col;
for (col = 0; col < result->width;col++) {
int idx = row * result->width + col;
result->vals[idx] = left->vals[idx] - right->vals[idx];
}
}
return result;
}
matrix* addMatrices(matrix* left, matrix* right, matrix* result) {
if ((left->width != right->width) || (left->height != right->height)) {
return NULL;
}
validateSuppliedMatrix(result, left->height, left->width);
int row;
for (row = 0; row < result->height; row++) {
int col;
for (col = 0; col < result->width;col++) {
int idx = row * result->width + col;
result->vals[idx] = left->vals[idx] + right->vals[idx];
}
}
return result;
}
static __inline void innerTransposeMatrix(const netF* original, netF* result, const int origHigh,
const int origWide) {
/*
Transpose blocks of the matrix at a time which is slightly cache friendlier for large matrices.
*/
const int block = 32;
int rowBlock;
for (rowBlock = 0; rowBlock < origHigh; rowBlock += block) {
for (int colBlock = 0; colBlock < origWide; colBlock += block) {
for (int row = rowBlock; row < rowBlock + block && row < origHigh; row++) {
for (int col = colBlock; col < colBlock + block && col < origWide; col++) {
result[col * origHigh + row] = original[row * origWide + col];
}
}
}
}
}
matrix* copyMatrixRows(matrix* orig, matrix* result, int* rowIdxs) {
int wide = result->width;
for (int row = 0; row < result->height; row++) {
memcpy(result->vals + wide * row, orig->vals + wide * rowIdxs[row], sizeof(netF) * wide);
}
return result;
}
matrix* transposeMatrix(matrix* original, matrix* result) {
validateSuppliedMatrix(result, original->width, original->height);
innerTransposeMatrix(original->vals, result->vals, original->height, original->width);
return result;
}
matrix* transposeMatrixSelf(matrix* original, netF* extraData) {
innerTransposeMatrix(original->vals, extraData, original->height, original->width);
setMatrixValues(original, extraData);
int oldHeight = original->height;
original->height = original->width;
original->width = oldHeight;
return original;
}
netF sumSquareMatrix(matrix* mat) {
netF total = 0;
int idx;
for (idx = 0; idx < (mat->width * mat->height); idx++) {
total += mat->vals[idx] * mat->vals[idx];
}
return total;
}
matrix* cloneMatrix(matrix* original, matrix* result) {
validateSuppliedMatrix(result, original->height, original->width);
return setMatrixValues(result, original->vals);
}
matrix* agumentMatrix(matrix* left, matrix* right, matrix* result) {
if (left->height != right->height) {
return NULL;
}
validateSuppliedMatrix(result, left->height, left->width + right->width);
int yn;
for (yn = 0; yn < left->height; yn++) {
int xn;
for (xn = 0; xn < left->width; xn++) {
setMatrixVal(result, yn, xn, *getMatrixVal(left, yn, xn));
}
for (xn = 0; xn < right->width; xn++) {
setMatrixVal(result, yn, left->width + xn, *getMatrixVal(right, yn, xn));
}
}
return result;
}
int doesMatrixHaveNANs(matrix* mat) {
if (mat == NULL) {
return 1;
}
int i;
for (i = 0; i < mat->width * mat->height; i++) {
if (isnan(mat->vals[i])) {
return 1;
}
}
return 0;
}
|
5125.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(2)
{
#pragma omp for schedule(dynamic, 8)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(dynamic, 8)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
vec_add_for.c | /* for Directive Example */
#include <omp.h>
#define N 1000
#define CHUNKSIZE 100
main(int argc, char *argv[]) {
int i, chunk;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
#pragma omp parallel shared(a,b,c,chunk) private(i)
{
#pragma omp for schedule(dynamic,chunk) nowait
for (i=0; i < N; i++)
c[i] = a[i] + b[i];
} /* end of parallel region */
}
|
GB_unop__identity_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_int64)
// op(A') function: GB (_unop_tran__identity_uint8_int64)
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_int64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include "heads.h"
void calculate(int nthreads) {
if (nthreads == 1) {
for (int i = SIZE - 1; i > -1; i--) {
// case matrix[i][i] == 0 is already rejected while eliminating
double curans = (double)vec[i][0] / matrix[i][i];
answers[i][0] = curans;
for (int j = i - 1; j > -1; j--) {
double divider = (double)matrix[j][i] / matrix[i][i];
vec[j][0] -= vec[i][0] * divider;
}
}
}
else {
for (int i = SIZE - 1; i > -1; i--) {
// case matrix[i][i] == 0 is already rejected while eliminating
double curans = (double)vec[i][0] / matrix[i][i];
answers[i][0] = curans;
#pragma omp parallel for num_threads(nthreads)
for (int j = i - 1; j > -1; j--) {
double divider = (double)matrix[j][i] / matrix[i][i];
vec[j][0] -= vec[i][0] * divider;
}
}
}
}
void exporting(double* arr_2d, int rownum, int colnum, char* fname) {
// save in csv mode, split by ','
// 2d array visiting solution: https://stackoverflow.com/questions/16724368/how-to-pass-a-2d-array-by-pointer-in-c
FILE* fp = NULL;
fp = fopen(fname, "w");
double* p = (double*)arr_2d;
for (int i = 0; i < rownum; i++) {
for (int j = 0; j < colnum; j++) {
double ele = p[i * colnum + j];
fprintf(fp, "%f,", ele);
}
fprintf(fp, "\n");
}
fclose(fp);
}
int main(int argc, char* argv[]) {
printf("Start Processing......\n");
printf("Matrix size: %i * %i\n", SIZE, SIZE);
start_t = clock();
void *matrix_pointer = matrix;
void *vector_pointer = vec;
void *answer_pointer = answers;
if (argc == 1) {
matrix_generator(1);
exporting(matrix_pointer, SIZE, SIZE, "matrix.csv");
vector_generator(1);
exporting(vector_pointer, SIZE, 1, "vector.csv");
for (int j = 0; j < SIZE; j++) {
int r = find_maxrow(1, j);
swap(r, j);
}
exporting(matrix_pointer, SIZE, SIZE, "converted_matrix.csv");
exporting(vector_pointer, SIZE, 1, "converted_vector.csv");
eliminate_all(1);
exporting(matrix_pointer, SIZE, SIZE, "eliminated_matrix.csv");
exporting(vector_pointer, SIZE, 1, "eliminated_vector.csv");
calculate(1);
exporting(answer_pointer, SIZE, 1, "answer.csv");
}
else {
int nthreads = 0;
if (argc == 3 && (argv[1][0] == '-' && argv[1][1] == 'p')){
for(int i = 0; i < strlen(argv[2]); i++){
if (argv[2][i] < '1' || argv[2][i] > '9') {
printf("Invalid argument!\n");
printf("%s\n", argv[2][i]);
printf("%i\n", strlen(argv[2]));
exit(EXIT_FAILURE);
}
}
nthreads = atoi(argv[2]);
if(nthreads == 0){
printf("Invalid argument!\n");
exit(EXIT_FAILURE);
}
printf("You are using %i threads\n", nthreads);
}
else{
printf("Invalid argument!\n");
exit(EXIT_FAILURE);
}
matrix_generator(nthreads);
exporting(matrix_pointer, SIZE, SIZE, "matrix.csv");
vector_generator(nthreads);
exporting(vector_pointer, SIZE, 1, "vector.csv");
for (int j = 0; j < SIZE; j++) {
int r = find_maxrow(nthreads, j);
swap(r, j);
}
exporting(matrix_pointer, SIZE, SIZE, "converted_matrix.csv");
exporting(vector_pointer, SIZE, 1, "converted_vector.csv");
eliminate_all(nthreads);
exporting(matrix_pointer, SIZE, SIZE, "eliminated_matrix.csv");
exporting(vector_pointer, SIZE, 1, "eliminated_vector.csv");
calculate(nthreads);
exporting(answer_pointer, SIZE, 1, "answer.csv");
}
finish_t = clock();
total_t = (double)(finish_t - start_t) / CLOCKS_PER_SEC;
printf("Spent time:%f \n",total_t);
printf("Finished, please check answer.csv to see what the answer is\n");
return 0;
} |
9586.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[2000 + 0][2600 + 0], double ey[2000 + 0][2600 + 0], double hz[2000 + 0][2600 + 0], double _fict_[1000 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 1; t4 <= nx - 1; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1)
for (t8 = 0; t8 <= ny - 1; t8 += 64)
for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1)
ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]);
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 0; t4 <= nx - 1; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 64)
for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 0; t4 <= nx - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < nx - 2 ? t4 + 7 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 64)
for (t10 = t8; t10 <= (ny - 2 < t8 + 63 ? ny - 2 : t8 + 63); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
GB_unaryop__minv_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint16
// op(A') function: GB_tran__minv_fp32_uint16
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint16
(
float *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
single.c | /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
C ontributed by Richard Henderson <r*th@redhat.com>.
This file is part of the GNU OpenMP Library (libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Copyright 2014 DEI - Universita' di Bologna
author DEI - Universita' di Bologna
Alessandro Capotondi - alessandro.capotondi@unibo.it
info #pragma omp single implementation */
#include "libgomp.h"
#define WS_NOT_INITED ( 0x0U )
void *
gomp_single_copy_start(gomp_work_share_t *ws)
{
GOMP_WARN_NOT_SUPPORTED("#pragma omp single copyprivate");
return NULL;
}
void
gomp_single_copy_end(gomp_work_share_t *ws, void *data)
{
GOMP_WARN_NOT_SUPPORTED("#pragma omp single copyprivate");
}
/**************** APIs **************************/
int
GOMP_single_start(void)
{
int ret = 0;
uint32_t pid = get_proc_id();
gomp_team_t *team = (gomp_team_t *) CURR_TEAM(pid);
gomp_work_share_t *ws;
//NOTE ws return from this function already locked
ret = gomp_work_share_start(&ws);
/* Faster than a call to gomp_work_share_end_nowait() */
ws->completed++;
if (ws->completed == team->nthreads)
{
ws->checkfirst = WS_NOT_INITED;
#ifdef OMP_NOWAIT_SUPPORT
gomp_free_work_share(ws->prev_ws);
#endif
}
gomp_hal_unlock(&ws->lock);
return ret;
}
void *
GOMP_single_copy_start(void)
{
GOMP_WARN_NOT_SUPPORTED("#pragma omp single copyprivate");
return NULL;
}
void
GOMP_single_copy_end(void *data)
{
GOMP_WARN_NOT_SUPPORTED("#pragma omp single copyprivate");
return;
}
|
omp_num_threads.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "omp_testsuite.h"
int
check_omp_num_threads (FILE * logFile)
{
int failed = 0;
int max_threads = 0;
int threads;
int nthreads;
/* first we check how many threads are available */
#pragma omp parallel
{
#pragma omp master
max_threads = omp_get_num_threads ();
}
/* we increase the number of threads from one to maximum: */
for (threads = 1; threads <= max_threads; threads++)
{
nthreads = 0;
#pragma omp parallel num_threads(threads) reduction(+:failed)
{
failed = failed + !(threads == omp_get_num_threads ());
#pragma omp atomic
nthreads += 1;
}
failed = failed + !(nthreads == threads);
}
return !failed;
}
int
crosscheck_omp_num_threads (FILE * logFile)
{
int failed = 0;
int max_threads = 0;
int threads;
int nthreads;
/* first we check how many threads are available */
#pragma omp parallel
{
#pragma omp master
max_threads = omp_get_num_threads ();
}
/* we increase the number of threads from one to maximum: */
for (threads = 1; threads <= max_threads; threads++)
{
nthreads = 0;
#pragma omp parallel reduction(+:failed)
{
failed = failed + !(threads == omp_get_num_threads ());
#pragma omp atomic
nthreads += 1;
}
failed = failed + !(nthreads == threads);
}
return !failed;
}
|
target-17.c | extern void abort (void);
void
foo (int n)
{
int a[n], i, err;
for (i = 0; i < n; i++)
a[i] = 5 * i;
#pragma omp target map(to:a) map(from:err) private(i)
{
err = 0;
for (i = 0; i < n; i++)
if (a[i] != 5 * i)
err = 1;
}
if (err)
abort ();
for (i = 0; i < n; i++)
a[i] += i;
#pragma omp target map(from:err) private(i)
{
err = 0;
for (i = 0; i < n; i++)
if (a[i] != 6 * i)
err = 1;
}
if (err)
abort ();
for (i = 0; i < n; i++)
a[i] += i;
#pragma omp target firstprivate (a) map(from:err) private(i)
{
err = 0;
for (i = 0; i < n; i++)
if (a[i] != 7 * i)
err = 1;
}
if (err)
abort ();
}
int
main ()
{
foo (9);
return 0;
}
|
pragma_omp.h | /******************************************************************************
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <thrust/detail/config.h>
#if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
// MSVC ICEs when using the standard C++11 `_Pragma` operator with OpenMP
// directives.
// WAR this by using the MSVC-extension `__pragma`. See this link for more info:
// https://developercommunity.visualstudio.com/t/Using-C11s-_Pragma-with-OpenMP-dire/1590628
#define THRUST_PRAGMA_OMP_IMPL(directive) __pragma(directive)
#else // Not MSVC:
#define THRUST_PRAGMA_OMP_IMPL(directive) _Pragma(#directive)
#endif
// For internal use only -- THRUST_PRAGMA_OMP is used to switch between
// different flavors of openmp pragmas. Pragmas are not emitted when OpenMP is
// not available.
//
// Usage:
// Replace: #pragma omp parallel for
// With : THRUST_PRAGMA_OMP(parallel for)
//
#if defined(_NVHPC_STDPAR_OPENMP) && _NVHPC_STDPAR_OPENMP == 1
#define THRUST_PRAGMA_OMP(directive) THRUST_PRAGMA_OMP_IMPL(omp_stdpar directive)
#elif defined(_OPENMP)
#define THRUST_PRAGMA_OMP(directive) THRUST_PRAGMA_OMP_IMPL(omp directive)
#else
#define THRUST_PRAGMA_OMP(directive)
#endif
|
nvptx_va_arg_delayed_diags.c | // RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only
// RUN: %clang_cc1 -verify -DDIAGS -DIMMEDIATE -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only
// RUN: %clang_cc1 -verify -DDIAGS -DDELAYED -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
#ifndef DIAGS
// expected-no-diagnostics
#endif // DIAGS
void foo(int r, ...) {
#ifdef IMMEDIATE
// expected-error@+4 {{CUDA device code does not support va_arg}}
#endif // IMMEDIATE
__builtin_va_list list;
__builtin_va_start(list, r);
(void)__builtin_va_arg(list, int);
__builtin_va_end(list);
}
#ifdef IMMEDIATE
#pragma omp declare target to(foo)
#endif //IMMEDIATE
#ifdef IMMEDIATE
#pragma omp declare target
#endif //IMMEDIATE
void t1(int r, ...) {
#ifdef DIAGS
// expected-error@+4 {{CUDA device code does not support va_arg}}
#endif // DIAGS
__builtin_va_list list;
__builtin_va_start(list, r);
(void)__builtin_va_arg(list, int);
__builtin_va_end(list);
}
#ifdef IMMEDIATE
#pragma omp end declare target
#endif //IMMEDIATE
int main() {
#ifdef DELAYED
#pragma omp target
#endif // DELAYED
{
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t1(0);
}
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
IntegratorHPMCMonoImplicit.h | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#ifndef __HPMC_MONO_IMPLICIT__H__
#define __HPMC_MONO_IMPLICIT__H__
#include "IntegratorHPMCMono.h"
#include "hoomd/Autotuner.h"
#include <random>
#include <cfloat>
#ifdef _OPENMP
#include <omp.h>
#endif
/*! \file IntegratorHPMCMonoImplicit.h
\brief Defines the template class for HPMC with implicit generated depletant solvent
\note This header cannot be compiled by nvcc
*/
#ifdef NVCC
#error This header cannot be compiled by nvcc
#endif
#include <hoomd/extern/pybind/include/pybind11/pybind11.h>
namespace hpmc
{
//! Template class for HPMC update with implicit depletants
/*!
Depletants are generated randomly on the fly according to the semi-grand canonical ensemble.
The penetrable depletants model is simulated.
\ingroup hpmc_integrators
*/
template< class Shape >
class IntegratorHPMCMonoImplicit : public IntegratorHPMCMono<Shape>
{
public:
//! Construct the integrator
IntegratorHPMCMonoImplicit(std::shared_ptr<SystemDefinition> sysdef,
unsigned int seed);
//! Destructor
virtual ~IntegratorHPMCMonoImplicit();
//! Set the depletant density in the free volume
void setDepletantDensity(Scalar n_R)
{
m_n_R = n_R;
m_need_initialize_poisson = true;
}
//! Set the type of depletant particle
void setDepletantType(unsigned int type)
{
m_type = type;
}
//! Number of depletant-reinsertions
/*! \param n_trial Depletant reinsertions per overlapping depletant
*/
void setNTrial(unsigned int n_trial)
{
m_n_trial = n_trial;
}
//! Return number of depletant re-insertions
unsigned int getNTrial()
{
return m_n_trial;
}
//! Returns the depletant density
Scalar getDepletantDensity()
{
return m_n_R;
}
//! Return the depletant type
unsigned int getDepletantType()
{
return m_type;
}
//! Return the number of re-insertion trials
unsigned int getNumTrials() const
{
return m_n_trial;
}
//! Reset statistics counters
virtual void resetStats()
{
IntegratorHPMCMono<Shape>::resetStats();
ArrayHandle<hpmc_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read);
m_implicit_count_run_start = h_counters.data[0];
}
//! Print statistics about the hpmc steps taken
virtual void printStats()
{
IntegratorHPMCMono<Shape>::printStats();
hpmc_implicit_counters_t result = getImplicitCounters(1);
double cur_time = double(this->m_clock.getTime()) / Scalar(1e9);
this->m_exec_conf->msg->notice(2) << "-- Implicit depletants stats:" << "\n";
this->m_exec_conf->msg->notice(2) << "Depletant insertions per second: "
<< double(result.insert_count)/cur_time << "\n";
this->m_exec_conf->msg->notice(2) << "Configurational bias attempts per second: "
<< double(result.reinsert_count)/cur_time << "\n";
this->m_exec_conf->msg->notice(2) << "Fraction of depletants in free volume: "
<< result.getFreeVolumeFraction() << "\n";
this->m_exec_conf->msg->notice(2) << "Fraction of overlapping depletants: "
<< result.getOverlapFraction()<< "\n";
}
//! Get the current counter values
hpmc_implicit_counters_t getImplicitCounters(unsigned int mode=0);
/* \returns a list of provided quantities
*/
std::vector< std::string > getProvidedLogQuantities()
{
// start with the integrator provided quantities
std::vector< std::string > result = IntegratorHPMCMono<Shape>::getProvidedLogQuantities();
// then add ours
result.push_back("hpmc_fugacity");
result.push_back("hpmc_ntrial");
result.push_back("hpmc_insert_count");
result.push_back("hpmc_reinsert_count");
result.push_back("hpmc_free_volume_fraction");
result.push_back("hpmc_overlap_fraction");
result.push_back("hpmc_configurational_bias_ratio");
return result;
}
//! Get the value of a logged quantity
virtual Scalar getLogValue(const std::string& quantity, unsigned int timestep);
//! Method to scale the box
virtual bool attemptBoxResize(unsigned int timestep, const BoxDim& new_box);
//! Slot to be called when number of types changes
void slotNumTypesChange();
protected:
Scalar m_n_R; //!< Average depletant number density in free volume
unsigned int m_type; //!< Type of depletant particle to generate
GPUArray<hpmc_implicit_counters_t> m_implicit_count; //!< Counter of active cell cluster moves
hpmc_implicit_counters_t m_implicit_count_run_start; //!< Counter of active cell cluster moves at run start
hpmc_implicit_counters_t m_implicit_count_step_start; //!< Counter of active cell cluster moves at run start
std::vector<std::poisson_distribution<unsigned int> > m_poisson; //!< Poisson distribution
std::vector<Scalar> m_lambda; //!< Poisson distribution parameters per type
Scalar m_d_dep; //!< Depletant circumsphere diameter
GPUArray<Scalar> m_d_min; //!< Minimum sphere from which test depletant is excluded
GPUArray<Scalar> m_d_max; //!< Maximum sphere for test depletant insertion
std::vector<hoomd::detail::Saru> m_rng_depletant; //!< RNGs for depletant insertion
bool m_rng_initialized; //!< True if RNGs have been initialized
unsigned int m_n_trial; //!< Number of trial re-insertions per depletant
bool m_need_initialize_poisson; //!< Flag to tell if we need to initialize the poisson distribution
//! Take one timestep forward
virtual void update(unsigned int timestep);
//! Initalize Poisson distribution parameters
virtual void updatePoissonParameters();
//! Initialize the Poisson distributions
virtual void initializePoissonDistribution();
//! Set the nominal width appropriate for depletion interaction
virtual void updateCellWidth();
//! Generate a random depletant position in a sphere around a particle
template<class RNG>
inline void generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar d_min,
vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants);
/*! Generate a random depletant position in a region including the sphere around a particle,
restricted so that it does not intersect another sphere
*/
template<class RNG>
inline void generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar delta_other,
vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants,
vec3<Scalar> pos_sphere_other);
//! Try inserting a depletant in a configuration such that it overlaps with the particle in the old (new) configuration
inline bool insertDepletant(vec3<Scalar>& pos_depletant, const Shape& shape_depletant, unsigned int idx,
typename Shape::param_type *params, unsigned int *h_overlaps, unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation,
vec3<Scalar> pos_new, quat<Scalar>& orientation_new, const typename Shape::param_type& params_new,
unsigned int &overlap_checks, unsigned int &overlap_err_count, bool &overlap_shape, bool new_config);
};
/*! \param sysdef System definition
\param cl Cell list
\param seed Random number generator seed
NOTE: only 3d supported at this time
*/
template< class Shape >
IntegratorHPMCMonoImplicit< Shape >::IntegratorHPMCMonoImplicit(std::shared_ptr<SystemDefinition> sysdef,
unsigned int seed)
: IntegratorHPMCMono<Shape>(sysdef, seed), m_n_R(0), m_type(0), m_d_dep(0.0), m_rng_initialized(false), m_n_trial(0),
m_need_initialize_poisson(true)
{
this->m_exec_conf->msg->notice(5) << "Constructing IntegratorHPMCImplicit" << std::endl;
GPUArray<hpmc_implicit_counters_t> implicit_count(1,this->m_exec_conf);
m_implicit_count.swap(implicit_count);
GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_min.swap(d_min);
GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_max.swap(d_max);
m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX);
}
//! Destructor
template< class Shape >
IntegratorHPMCMonoImplicit< Shape >::~IntegratorHPMCMonoImplicit()
{
}
template <class Shape>
void IntegratorHPMCMonoImplicit<Shape>::slotNumTypesChange()
{
// call parent class method
IntegratorHPMCMono<Shape>::slotNumTypesChange();
m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX);
GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_min.swap(d_min);
GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_max.swap(d_max);
m_need_initialize_poisson = true;
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::updatePoissonParameters()
{
// Depletant diameter
quat<Scalar> o;
Shape shape_depletant(o, this->m_params[this->m_type]);
m_d_dep = shape_depletant.getCircumsphereDiameter();
// access GPUArrays
ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::overwrite);
ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::overwrite);
for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type)
{
// test sphere diameter and volume
Shape shape_i(quat<Scalar>(), this->m_params[i_type]);
Scalar delta = shape_i.getCircumsphereDiameter()+m_d_dep;
h_d_max.data[i_type] = delta;
// volume of insertion sphere
Scalar V = Scalar(M_PI/6.0)*delta*delta*delta;
// Minimum diameter of colloid sphere in which depletant can be inserted without overlapping with other colloids
// Scalar d = std::max(Scalar(2.0)*shape_i.getInsphereRadius()-m_d_dep,0.0);
Scalar d = Scalar(0.0);
h_d_min.data[i_type] = d;
// subtract inner sphere from sampling volume
V -= Scalar(M_PI/6.0)*d*d*d;
// average number of depletants in volume
m_lambda[i_type] = this->m_n_R*V;
}
}
template<class Shape>
void IntegratorHPMCMonoImplicit< Shape >::initializePoissonDistribution()
{
m_poisson.resize(this->m_pdata->getNTypes());
for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type)
{
// parameter for Poisson distribution
Scalar lambda = m_lambda[i_type];
if (lambda <= Scalar(0.0))
{
// guard against invalid parameters
continue;
}
m_poisson[i_type] = std::poisson_distribution<unsigned int>(lambda);
}
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::updateCellWidth()
{
this->m_nominal_width = this->getMaxDiameter();
if (m_n_R > Scalar(0.0))
{
// add range of depletion interaction
quat<Scalar> o;
Shape tmp(o, this->m_params[m_type]);
this->m_nominal_width += tmp.getCircumsphereDiameter();
}
this->m_exec_conf->msg->notice(5) << "IntegratorHPMCMonoImplicit: updating nominal width to " << this->m_nominal_width << std::endl;
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::update(unsigned int timestep)
{
this->m_exec_conf->msg->notice(10) << "HPMCMonoImplicit update: " << timestep << std::endl;
IntegratorHPMC::update(timestep);
// update poisson distributions
if (m_need_initialize_poisson)
{
updatePoissonParameters();
initializePoissonDistribution();
m_need_initialize_poisson = false;
}
if (!m_rng_initialized)
{
unsigned int n_omp_threads = 1;
#ifdef _OPENMP
n_omp_threads = omp_get_max_threads();
#endif
// initialize a set of random number generators
for (unsigned int i = 0; i < n_omp_threads; ++i)
{
m_rng_depletant.push_back(hoomd::detail::Saru(timestep,this->m_seed+this->m_exec_conf->getRank(), i));
}
m_rng_initialized = true;
}
// get needed vars
ArrayHandle<hpmc_counters_t> h_counters(this->m_count_total, access_location::host, access_mode::readwrite);
hpmc_counters_t& counters = h_counters.data[0];
ArrayHandle<hpmc_implicit_counters_t> h_implicit_counters(m_implicit_count, access_location::host, access_mode::readwrite);
hpmc_implicit_counters_t& implicit_counters = h_implicit_counters.data[0];
m_implicit_count_step_start = implicit_counters;
const BoxDim& box = this->m_pdata->getBox();
unsigned int ndim = this->m_sysdef->getNDimensions();
#ifdef ENABLE_MPI
// compute the width of the active region
Scalar3 npd = box.getNearestPlaneDistance();
Scalar3 ghost_fraction = this->m_nominal_width / npd;
#endif
// Shuffle the order of particles for this step
this->m_update_order.resize(this->m_pdata->getN());
this->m_update_order.shuffle(timestep);
// update the AABB Tree
this->buildAABBTree();
// limit m_d entries so that particles cannot possibly wander more than one box image in one time step
this->limitMoveDistances();
// update the image list
this->updateImageList();
// combine the three seeds
std::vector<unsigned int> seed_seq(3);
seed_seq[0] = this->m_seed;
seed_seq[1] = timestep;
seed_seq[2] = this->m_exec_conf->getRank();
std::seed_seq seed(seed_seq.begin(), seed_seq.end());
// RNG for poisson distribution
std::mt19937 rng_poisson(seed);
if (this->m_prof) this->m_prof->push(this->m_exec_conf, "HPMC implicit");
// access depletant insertion sphere dimensions
ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::read);
ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::read);
// loop over local particles nselect times
for (unsigned int i_nselect = 0; i_nselect < this->m_nselect; i_nselect++)
{
// access particle data and system box
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<Scalar4> h_orientation(this->m_pdata->getOrientationArray(), access_location::host, access_mode::readwrite);
// access interaction matrix
ArrayHandle<unsigned int> h_overlaps(this->m_overlaps, access_location::host, access_mode::read);
//access move sizes
ArrayHandle<Scalar> h_d(this->m_d, access_location::host, access_mode::read);
ArrayHandle<Scalar> h_a(this->m_a, access_location::host, access_mode::read);
// loop through N particles in a shuffled order
for (unsigned int cur_particle = 0; cur_particle < this->m_pdata->getN(); cur_particle++)
{
unsigned int i = this->m_update_order[cur_particle];
// read in the current position and orientation
Scalar4 postype_i = h_postype.data[i];
Scalar4 orientation_i = h_orientation.data[i];
vec3<Scalar> pos_i = vec3<Scalar>(postype_i);
#ifdef ENABLE_MPI
if (this->m_comm)
{
// only move particle if active
if (!isActive(make_scalar3(postype_i.x, postype_i.y, postype_i.z), box, ghost_fraction))
continue;
}
#endif
// make a trial move for i
hoomd::detail::Saru rng_i(i, this->m_seed + this->m_exec_conf->getRank()*this->m_nselect + i_nselect, timestep);
int typ_i = __scalar_as_int(postype_i.w);
Shape shape_i(quat<Scalar>(orientation_i), this->m_params[typ_i]);
unsigned int move_type_select = rng_i.u32() & 0xffff;
bool move_type_translate = !shape_i.hasOrientation() || (move_type_select < this->m_move_ratio);
if (move_type_translate)
{
move_translate(pos_i, rng_i, h_d.data[typ_i], ndim);
#ifdef ENABLE_MPI
if (this->m_comm)
{
// check if particle has moved into the ghost layer, and skip if it is
if (!isActive(vec_to_scalar3(pos_i), box, ghost_fraction))
continue;
}
#endif
}
else
{
move_rotate(shape_i.orientation, rng_i, h_a.data[typ_i], ndim);
}
// check for overlaps with neighboring particle's positions
bool overlap=false;
detail::AABB aabb_i_local = shape_i.getAABB(vec3<Scalar>(0,0,0));
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_i_image = pos_i + this->m_image_list[cur_image];
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
Scalar4 postype_j;
Scalar4 orientation_j;
// handle j==i situations
if ( j != i )
{
// load the position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
}
else
{
if (cur_image == 0)
{
// in the first image, skip i == j
continue;
}
else
{
// If this is particle i and we are in an outside image, use the translated position and orientation
postype_j = make_scalar4(pos_i.x, pos_i.y, pos_i.z, postype_i.w);
orientation_j = quat_to_scalar4(shape_i.orientation);
}
}
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_i_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]);
counters.overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_i.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(typ_i,typ_j)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_i, shape_j, counters.overlap_err_count))
{
overlap = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap)
break;
} // end loop over AABB nodes
if (overlap)
break;
} // end loop over images
// whether the move is accepted
bool accept = !overlap;
if (!overlap)
{
// log of acceptance probability
Scalar lnb(0.0);
unsigned int zero = 0;
// The trial move is valid. Now generate random depletant particles in a sphere
// of radius (d_max+d_depletant+move size)/2.0 around the original particle position
// draw number from Poisson distribution
unsigned int n = 0;
if (m_lambda[typ_i] > Scalar(0.0))
{
n = m_poisson[typ_i](rng_poisson);
}
unsigned int n_overlap_checks = 0;
unsigned int overlap_err_count = 0;
unsigned int insert_count = 0;
unsigned int reinsert_count = 0;
unsigned int free_volume_count = 0;
unsigned int overlap_count = 0;
volatile bool flag=false;
#pragma omp parallel for reduction(+ : lnb, n_overlap_checks, overlap_err_count, insert_count, reinsert_count, free_volume_count, overlap_count) reduction(max: zero) shared(flag) if (n>0) schedule(dynamic)
for (unsigned int k = 0; k < n; ++k)
{
if (flag)
{
#ifndef _OPENMP
break;
#else
continue;
#endif
}
insert_count++;
// generate a random depletant coordinate and orientation in the sphere around the new position
vec3<Scalar> pos_test;
quat<Scalar> orientation_test;
#ifdef _OPENMP
unsigned int thread_idx = omp_get_thread_num();
#else
unsigned int thread_idx = 0;
#endif
generateDepletant(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], h_d_min.data[typ_i], pos_test,
orientation_test, this->m_params[m_type]);
Shape shape_test(orientation_test, this->m_params[m_type]);
detail::AABB aabb_test_local = shape_test.getAABB(vec3<Scalar>(0,0,0));
bool overlap_depletant = false;
// Check if the new configuration of particle i generates an overlap
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
detail::AABB aabb = aabb_test_local;
aabb.translate(pos_test_image);
vec3<Scalar> r_ij = pos_i - pos_test_image;
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_i, overlap_err_count))
{
overlap_depletant = true;
overlap_count++;
break;
}
}
if (overlap_depletant)
{
// check against overlap with old position
bool overlap_old = false;
// Check if the old configuration of particle i generates an overlap
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
vec3<Scalar> r_ij = vec3<Scalar>(h_postype.data[i]) - pos_test_image;
n_overlap_checks++;
// check circumsphere overlap
Shape shape_i_old(quat<Scalar>(h_orientation.data[i]), this->m_params[typ_i]);
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i_old.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_i_old, overlap_err_count))
{
overlap_old = true;
break;
}
}
if (!overlap_old)
{
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
detail::AABB aabb = aabb_test_local;
aabb.translate(pos_test_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
// we checked ptl i first
if (i == j) continue;
Scalar4 postype_j;
Scalar4 orientation_j;
// load the old position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_test_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]);
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type,typ_j)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_j, overlap_err_count))
{
// depletant is ignored for any overlap in the old configuration
overlap_old = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap_old)
break;
} // end loop over AABB nodes
if (overlap_old)
break;
} // end loop over images
}
if (!overlap_old)
{
free_volume_count++;
}
else
{
// the depletant overlap doesn't count since it was already overlapping
// in the old configuration
overlap_depletant = false;
}
}
if (overlap_depletant && !m_n_trial)
{
zero = 1;
// break out of loop
flag = true;
}
else if (overlap_depletant && m_n_trial)
{
const typename Shape::param_type& params_depletant = this->m_params[m_type];
// Number of successful depletant insertions in new configuration
unsigned int n_success_new = 0;
// Number of allowed insertion trials (those which overlap with colloid at old position)
unsigned int n_overlap_shape_new = 0;
// diameter (around origin) in which we are guaruanteed to intersect with the shape
Scalar delta_insphere = Scalar(2.0)*shape_i.getInsphereRadius();
// same for old reverse move. Because we have already sampled one successful insertion
// that overlaps with the colloid at the new position, we increment by one (super-detailed
// balance)
unsigned int n_success_old = 1;
unsigned int n_overlap_shape_old = 1;
Scalar4& postype_i_old = h_postype.data[i];
vec3<Scalar> pos_i_old(postype_i_old);
quat<Scalar> orientation_i_old(h_orientation.data[i]);
for (unsigned int l = 0; l < m_n_trial; ++l)
{
// generate a random depletant position and orientation
// in both the old and the new configuration of the colloid particle
vec3<Scalar> pos_depletant_old, pos_depletant_new;
quat<Scalar> orientation_depletant_old, orientation_depletant_new;
// try moving the overlapping depletant in the excluded volume
// such that it overlaps with the particle at the old position
generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i_old, h_d_max.data[typ_i], delta_insphere,
pos_depletant_new, orientation_depletant_new, params_depletant, pos_i);
reinsert_count++;
Shape shape_depletant_new(orientation_depletant_new, params_depletant);
const typename Shape::param_type& params_i = this->m_params[__scalar_as_int(postype_i_old.w)];
bool overlap_shape = false;
if (insertDepletant(pos_depletant_new, shape_depletant_new, i, this->m_params.data(), h_overlaps.data, typ_i,
h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i,
n_overlap_checks, overlap_err_count, overlap_shape, false))
{
n_success_new++;
}
if (overlap_shape)
{
// depletant overlaps with colloid at old position
n_overlap_shape_new++;
}
if (l >= 1)
{
// as above, in excluded volume sphere at new position
generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], delta_insphere,
pos_depletant_old, orientation_depletant_old, params_depletant, pos_i_old);
Shape shape_depletant_old(orientation_depletant_old, params_depletant);
if (insertDepletant(pos_depletant_old, shape_depletant_old, i, this->m_params.data(), h_overlaps.data, typ_i,
h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i,
n_overlap_checks, overlap_err_count, overlap_shape, true))
{
n_success_old++;
}
if (overlap_shape)
{
// depletant overlaps with colloid at new position
n_overlap_shape_old++;
}
reinsert_count++;
}
n_overlap_checks += counters.overlap_checks;
overlap_err_count += counters.overlap_err_count;
} // end loop over re-insertion attempts
if (n_success_new != 0)
{
lnb += log((Scalar)n_success_new/(Scalar)n_overlap_shape_new);
lnb -= log((Scalar)n_success_old/(Scalar)n_overlap_shape_old);
}
else
{
zero = 1;
// break out of loop
flag = true;
}
} // end if depletant overlap
} // end loop over depletants
// increment counters
counters.overlap_checks += n_overlap_checks;
counters.overlap_err_count += overlap_err_count;
implicit_counters.insert_count += insert_count;
implicit_counters.free_volume_count += free_volume_count;
implicit_counters.overlap_count += overlap_count;
implicit_counters.reinsert_count += reinsert_count;
// apply acceptance criterium
if (!zero)
{
accept = rng_i.f() < exp(lnb);
}
else
{
accept = false;
}
} // end depletant placement
// if the move is accepted
if (accept)
{
// increment accept counter and assign new position
if (!shape_i.ignoreStatistics())
{
if (move_type_translate)
counters.translate_accept_count++;
else
counters.rotate_accept_count++;
}
// update the position of the particle in the tree for future updates
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i);
this->m_aabb_tree.update(i, aabb);
// update position of particle
h_postype.data[i] = make_scalar4(pos_i.x,pos_i.y,pos_i.z,postype_i.w);
if (shape_i.hasOrientation())
{
h_orientation.data[i] = quat_to_scalar4(shape_i.orientation);
}
}
else
{
if (!shape_i.ignoreStatistics())
{
// increment reject counter
if (move_type_translate)
counters.translate_reject_count++;
else
counters.rotate_reject_count++;
}
}
} // end loop over all particles
} // end loop over nselect
{
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite);
// wrap particles back into box
for (unsigned int i = 0; i < this->m_pdata->getN(); i++)
{
box.wrap(h_postype.data[i], h_image.data[i]);
}
}
// perform the grid shift
#ifdef ENABLE_MPI
if (this->m_comm)
{
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite);
// precalculate the grid shift
hoomd::detail::Saru rng(timestep, this->m_seed, 0xf4a3210e);
Scalar3 shift = make_scalar3(0,0,0);
shift.x = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
shift.y = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
if (this->m_sysdef->getNDimensions() == 3)
{
shift.z = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
}
for (unsigned int i = 0; i < this->m_pdata->getN(); i++)
{
// read in the current position and orientation
Scalar4 postype_i = h_postype.data[i];
vec3<Scalar> r_i = vec3<Scalar>(postype_i); // translation from local to global coordinates
r_i += vec3<Scalar>(shift);
h_postype.data[i] = vec_to_scalar4(r_i, postype_i.w);
box.wrap(h_postype.data[i], h_image.data[i]);
}
this->m_pdata->translateOrigin(shift);
}
#endif
if (this->m_prof) this->m_prof->pop(this->m_exec_conf);
// migrate and exchange particles
this->communicate(true);
// all particle have been moved, the aabb tree is now invalid
this->m_aabb_tree_invalid = true;
}
/* \param rng The random number generator
* \param pos_sphere Center of sphere
* \param delta diameter of sphere
* \param d_min Diameter of smaller sphere excluding depletant
* \param pos Position of depletant (return value)
* \param orientation ion of depletant (return value)
* \param params_depletant Depletant parameters
*/
template<class Shape>
template<class RNG>
inline void IntegratorHPMCMonoImplicit<Shape>::generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta,
Scalar d_min, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant)
{
// draw a random vector in the excluded volume sphere of the colloid
Scalar theta = rng.template s<Scalar>(Scalar(0.0),Scalar(2.0*M_PI));
Scalar z = rng.template s<Scalar>(Scalar(-1.0),Scalar(1.0));
// random normalized vector
vec3<Scalar> n(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(theta),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(theta),z);
// draw random radial coordinate in test sphere
Scalar r3 = rng.template s<Scalar>(fast::pow(d_min/delta,Scalar(3.0)),Scalar(1.0));
Scalar r = Scalar(0.5)*delta*fast::pow(r3,Scalar(1.0/3.0));
// test depletant position
vec3<Scalar> pos_depletant = pos_sphere+r*n;
Shape shape_depletant(quat<Scalar>(), params_depletant);
if (shape_depletant.hasOrientation())
{
orientation = generateRandomOrientation(rng);
}
pos = pos_depletant;
}
/* \param rng The random number generator
* \param pos_sphere Center of sphere
* \param delta diameter of sphere
* \param delta_other diameter of other sphere
* \param pos Position of depletant (return value)
* \param orientation ion of depletant (return value)
* \param params_depletant Depletant parameters
* \params pos_sphere_other Center of other sphere
*/
template<class Shape>
template<class RNG>
inline void IntegratorHPMCMonoImplicit<Shape>::generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta,
Scalar delta_other, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant,
vec3<Scalar> pos_sphere_other)
{
vec3<Scalar> r_ij = pos_sphere - pos_sphere_other;
Scalar d = fast::sqrt(dot(r_ij,r_ij));
Scalar rmin(0.0);
Scalar rmax = Scalar(0.5)*delta;
Scalar ctheta_min(-1.0);
bool do_rotate = false;
if (d > Scalar(0.0) && delta_other > Scalar(0.0))
{
// draw a random direction in the bounded sphereical shell
Scalar ctheta = (delta_other*delta_other+Scalar(4.0)*d*d-delta*delta)/(Scalar(4.0)*delta_other*d);
if (ctheta >= Scalar(-1.0) && ctheta < Scalar(1.0))
{
// true intersection, we can restrict angular sampling
ctheta_min = ctheta;
}
// is there an intersection?
if (Scalar(2.0)*d < delta+delta_other)
{
// sample in shell around smaller sphere
rmin = delta_other/Scalar(2.0);
rmax = d+delta/Scalar(2.0);
do_rotate = true;
}
}
// draw random radial coordinate in a spherical shell
Scalar r3 = rng.template s<Scalar>(fast::pow(rmin/rmax,Scalar(3.0)),Scalar(1.0));
Scalar r = rmax*fast::pow(r3,Scalar(1.0/3.0));
// random direction in spherical shell
Scalar z = rng.s(ctheta_min,Scalar(1.0));
Scalar phi = Scalar(2.0*M_PI)*rng.template s<Scalar>();
vec3<Scalar> n;
if (do_rotate)
{
vec3<Scalar> u(r_ij/d);
// normal vector
vec3<Scalar> v(cross(u,vec3<Scalar>(0,0,1)));
if (dot(v,v) < EPSILON)
{
v = cross(u,vec3<Scalar>(0,1,0));
}
v *= fast::rsqrt(dot(v,v));
quat<Scalar> q(quat<Scalar>::fromAxisAngle(u,phi));
n = z*u+(fast::sqrt(Scalar(1.0)-z*z))*rotate(q,v);
}
else
{
n = vec3<Scalar>(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(phi),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(phi),z);
}
// test depletant position
pos = r*n;
if (do_rotate)
{
// insert such that it potentially intersects the sphere, but not the other one
pos += pos_sphere_other;
}
else
{
// insert in sphere
pos += pos_sphere;
}
Shape shape_depletant(quat<Scalar>(), params_depletant);
if (shape_depletant.hasOrientation())
{
orientation = generateRandomOrientation(rng);
}
}
/*! \param pos_depletant Depletant position
* \param shape_depletant Depletant shape
* \param idx Index of updated particle
* \param h_overlaps Interaction matrix
* \param typ_i type of updated particle
* \param h_orientation ion array
* \param pos_new New position of updated particle
* \param orientation_new New orientation of updated particle
* \param params_new New shape parameters of updated particle
* \param counters HPMC overlap counters
*/
template<class Shape>
inline bool IntegratorHPMCMonoImplicit<Shape>::insertDepletant(vec3<Scalar>& pos_depletant,
const Shape& shape_depletant, unsigned int idx, typename Shape::param_type *params, unsigned int *h_overlaps,
unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation, vec3<Scalar> pos_new, quat<Scalar>& orientation_new,
const typename Shape::param_type& params_new, unsigned int &n_overlap_checks,
unsigned int &overlap_err_count, bool& overlap_shape, bool new_config)
{
overlap_shape=false;
detail::AABB aabb_depletant_local = shape_depletant.getAABB(vec3<Scalar>(0,0,0));
// now check if depletant overlaps with moved particle in the old configuration
Shape shape_i(quat<Scalar>(), params_new);
if (shape_i.hasOrientation())
{
if (! new_config)
{
// load old orientation
Scalar4 orientation_i = h_orientation[idx];
shape_i.orientation = quat<Scalar>(orientation_i);
}
else
{
shape_i.orientation = orientation_new;
}
}
vec3<Scalar> pos_i;
if (!new_config)
{
// load old position
pos_i = vec3<Scalar>(h_postype[idx]);
}
else
{
pos_i = pos_new;
}
// only need to consider the (0,0,0) image
detail::AABB aabb = aabb_depletant_local;
aabb.translate(pos_depletant);
// put particles in coordinate system of depletant
vec3<Scalar> r_ij = pos_i - pos_depletant;
n_overlap_checks++;
// test circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps[this->m_overlap_idx(typ_i, m_type)]
&& circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count))
{
overlap_shape = true;
}
// same, but for reverse move
if (shape_i.hasOrientation())
{
if (new_config)
{
// load old orientation
Scalar4 orientation_i = h_orientation[idx];
shape_i.orientation = quat<Scalar>(orientation_i);
}
else
{
shape_i.orientation = orientation_new;
}
}
if (new_config)
{
// load old position
pos_i = vec3<Scalar>(h_postype[idx]);
}
else
{
pos_i = pos_new;
}
// only need to consider the (0,0,0) image
aabb = aabb_depletant_local;
aabb.translate(pos_depletant);
// put particles in coordinate system of depletant
r_ij = pos_i - pos_depletant;
n_overlap_checks++;
// test circumsphere overlap
rsq = dot(r_ij,r_ij);
DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
// check for overlaps with neighboring particle's positions
bool overlap=false;
if (h_overlaps[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count))
{
// if we are already overlapping in the other configuration, this doesn't count as an insertion
overlap = true;
}
if (!overlap && overlap_shape)
{
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_depletant_image = pos_depletant + this->m_image_list[cur_image];
detail::AABB aabb = aabb_depletant_local;
aabb.translate(pos_depletant_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
// load the position and orientation of the j particle
Scalar4 postype_j = h_postype[j];
vec3<Scalar> pos_j(postype_j);
Scalar4 orientation_j = h_orientation[j];
unsigned int type = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), params[type]);
if (j == idx)
{
// we have already exclued overlap with the moved particle above
continue;
}
// put particles in coordinate system of depletant
vec3<Scalar> r_ij = pos_j - pos_depletant_image;
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps[this->m_overlap_idx(type, m_type)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_depletant, shape_j, overlap_err_count))
{
overlap = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap)
break;
} // end loop over AABB nodes
if (overlap)
break;
} // end loop over images
} // end if overlap with shape
return overlap_shape && !overlap;
}
/*! \param quantity Name of the log quantity to get
\param timestep Current time step of the simulation
\return the requested log quantity.
*/
template<class Shape>
Scalar IntegratorHPMCMonoImplicit<Shape>::getLogValue(const std::string& quantity, unsigned int timestep)
{
if (quantity == "hpmc_fugacity")
{
return (Scalar) m_n_R;
}
if (quantity == "hpmc_ntrial")
{
return (Scalar) m_n_trial;
}
hpmc_counters_t counters = IntegratorHPMC::getCounters(2);
hpmc_implicit_counters_t implicit_counters = getImplicitCounters(2);
if (quantity == "hpmc_insert_count")
{
// return number of depletant insertions per colloid
if (counters.getNMoves() > 0)
return (Scalar)implicit_counters.insert_count/(Scalar)counters.getNMoves();
else
return Scalar(0.0);
}
if (quantity == "hpmc_reinsert_count")
{
// return number of overlapping depletants reinserted per colloid
if (counters.getNMoves() > 0)
return (Scalar)implicit_counters.reinsert_count/(Scalar)counters.getNMoves();
else
return Scalar(0.0);
}
if (quantity == "hpmc_free_volume_fraction")
{
// return fraction of free volume in depletant insertion sphere
return (Scalar) implicit_counters.getFreeVolumeFraction();
}
if (quantity == "hpmc_overlap_fraction")
{
// return fraction of overlapping depletants after trial move
return (Scalar) implicit_counters.getOverlapFraction();
}
if (quantity == "hpmc_configurational_bias_ratio")
{
// return fraction of overlapping depletants after trial move
return (Scalar) implicit_counters.getConfigurationalBiasRatio();
}
//nothing found -> pass on to base class
return IntegratorHPMCMono<Shape>::getLogValue(quantity, timestep);
}
/*! \param mode 0 -> Absolute count, 1 -> relative to the start of the run, 2 -> relative to the last executed step
\return The current state of the acceptance counters
IntegratorHPMCMonoImplicit maintains a count of the number of accepted and rejected moves since instantiation. getCounters()
provides the current value. The parameter *mode* controls whether the returned counts are absolute, relative
to the start of the run, or relative to the start of the last executed step.
*/
template<class Shape>
hpmc_implicit_counters_t IntegratorHPMCMonoImplicit<Shape>::getImplicitCounters(unsigned int mode)
{
ArrayHandle<hpmc_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read);
hpmc_implicit_counters_t result;
if (mode == 0)
result = h_counters.data[0];
else if (mode == 1)
result = h_counters.data[0] - m_implicit_count_run_start;
else
result = h_counters.data[0] - m_implicit_count_step_start;
#ifdef ENABLE_MPI
if (this->m_comm)
{
// MPI Reduction to total result values on all ranks
MPI_Allreduce(MPI_IN_PLACE, &result.insert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.free_volume_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.overlap_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.reinsert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
}
#endif
return result;
}
/*! NPT simulations are not supported with implicit depletants
(The Nmu_ptPT ensemble is instable)
\returns false if resize results in overlaps
*/
template<class Shape>
bool IntegratorHPMCMonoImplicit<Shape>::attemptBoxResize(unsigned int timestep, const BoxDim& new_box)
{
this->m_exec_conf->msg->error() << "Nmu_pPT simulations are unsupported." << std::endl;
throw std::runtime_error("Error during implicit depletant integration\n");
}
//! Export this hpmc integrator to python
/*! \param name Name of the class in the exported python module
\tparam Shape An instantiation of IntegratorHPMCMono<Shape> will be exported
*/
template < class Shape > void export_IntegratorHPMCMonoImplicit(pybind11::module& m, const std::string& name)
{
pybind11::class_<IntegratorHPMCMonoImplicit<Shape>, std::shared_ptr< IntegratorHPMCMonoImplicit<Shape> > >(m, name.c_str(), pybind11::base< IntegratorHPMCMono<Shape> >())
.def(pybind11::init< std::shared_ptr<SystemDefinition>, unsigned int >())
.def("setDepletantDensity", &IntegratorHPMCMonoImplicit<Shape>::setDepletantDensity)
.def("setDepletantType", &IntegratorHPMCMonoImplicit<Shape>::setDepletantType)
.def("setNTrial", &IntegratorHPMCMonoImplicit<Shape>::setNTrial)
.def("getNTrial", &IntegratorHPMCMonoImplicit<Shape>::getNTrial)
.def("getImplicitCounters", &IntegratorHPMCMonoImplicit<Shape>::getImplicitCounters)
;
}
//! Export the counters for depletants
inline void export_hpmc_implicit_counters(pybind11::module& m)
{
pybind11::class_< hpmc_implicit_counters_t >(m, "hpmc_implicit_counters_t")
.def_readwrite("insert_count", &hpmc_implicit_counters_t::insert_count)
.def_readwrite("reinsert_count", &hpmc_implicit_counters_t::reinsert_count)
.def_readwrite("free_volume_count", &hpmc_implicit_counters_t::free_volume_count)
.def_readwrite("overlap_count", &hpmc_implicit_counters_t::overlap_count)
.def("getFreeVolumeFraction", &hpmc_implicit_counters_t::getFreeVolumeFraction)
.def("getOverlapFraction", &hpmc_implicit_counters_t::getOverlapFraction)
.def("getConfigurationalBiasRatio", &hpmc_implicit_counters_t::getConfigurationalBiasRatio)
;
}
} // end namespace hpmc
#endif // __HPMC_MONO_IMPLICIT__H__
|
loop-6.c | /* { dg-do run } */
#include <omp.h>
extern void abort (void);
#define LLONG_MAX __LONG_LONG_MAX__
#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
#define INT_MAX __INT_MAX__
int arr[6 * 5];
void
set (int loopidx, int idx)
{
#pragma omp atomic
arr[loopidx * 5 + idx]++;
}
#define check(var, val, loopidx, idx) \
if (var == (val)) set (loopidx, idx); else
#define test(loopidx, count) \
for (idx = 0; idx < 5; idx++) \
if (arr[loopidx * 5 + idx] != idx < count) \
abort (); \
else \
arr[loopidx * 5 + idx] = 0
int
test1 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(dynamic,1) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test2 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(guided,1) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test3 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(static) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(static) nowait
for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(static) nowait
for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(static) nowait
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(static) nowait
for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(static) nowait
for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test4 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(static,1) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test5 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(runtime) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
main (void)
{
if (2 * sizeof (int) != sizeof (long long))
return 0;
test1 ();
test2 ();
test3 ();
test4 ();
omp_set_schedule (omp_sched_static, 0);
test5 ();
omp_set_schedule (omp_sched_static, 3);
test5 ();
omp_set_schedule (omp_sched_dynamic, 5);
test5 ();
omp_set_schedule (omp_sched_guided, 2);
test5 ();
return 0;
}
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_procs;
HYPRE_Int i;
hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
}
else
{
col_starts = row_starts;
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
#else
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
if (row_partitioning[0])
{
for (i = 0; i < num_procs+1; i++)
{
row_starts[i] = row_partitioning[i]-row_partitioning[0];
}
}
else
{
for (i = 0; i < num_procs+1; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
if (col_partitioning[0])
{
for (i = 0; i < num_procs+1; i++)
{
col_starts[i] = col_partitioning[i]-col_partitioning[0];
}
}
else
{
for (i = 0; i < num_procs+1; i++)
{
col_starts[i] = col_partitioning[i];
}
}
}
else
{
col_starts = row_starts;
}
par_matrix = hypre_ParCSRMatrixCreate(comm, row_starts[num_procs],
col_starts[num_procs],
row_starts, col_starts, 0, 0, 0);
#endif
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
#else
HYPRE_Int my_id;
hypre_MPI_Comm_rank(hypre_IJMatrixComm(matrix), &my_id);
local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]);
local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]);
#endif
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
#else
HYPRE_Int my_id;
hypre_MPI_Comm_rank(hypre_IJMatrixComm(matrix), &my_id);
local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]);
local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]);
#endif
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
#else
local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]);
local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]);
#endif
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
#else
local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]);
local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]);
#endif
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
if (memory_location_aux == HYPRE_MEMORY_HOST)
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i+1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i+1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, pstart, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
pstart = 0;
#else
pstart = my_id;
#endif
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[pstart] &&
row_index < row_partitioning[pstart+1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[pstart]);
ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#endif
HYPRE_Int i, j, n, ii, indx, pstart;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_starts[0];
col_n = col_starts[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_starts[my_id];
col_n = col_starts[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i=0; i < nrows; i++)
{
counter[i+1] = counter[i]+ncols[i];
}
indx = 0;
for (i=0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
row_size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (counter[i]+row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i+1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i=0; i < nrows; i++)
{
ncols[i] = counter[i+1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
for (i=0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx]-col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, ii, j, j2, jj, n, row_index = 0;
HYPRE_BigInt row;
HYPRE_Int iii, iid, indx, ip;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends3;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_Int *chunks;
HYPRE_BigInt *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *dbl_send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_chunks;
HYPRE_BigInt *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *dbl_recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_BigInt *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJMatrixRowPartitioning(matrix);
info = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
chunks = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
proc_id_mem = hypre_CTAlloc(HYPRE_Int, off_proc_i_indx/2, HYPRE_MEMORY_HOST);
j=0;
for (i=0; i < off_proc_i_indx; i++)
{
row = off_proc_i[i++];
//if (row < 0) row = -row-1;
n = (HYPRE_Int)off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[j++] = proc_id;
info[proc_id] += n;
chunks[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
send_procs = hypre_CTAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST);
send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
dbl_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
num_sends3 = 3*num_sends;
int_buffer = hypre_CTAlloc(HYPRE_Int, 3*num_sends, HYPRE_MEMORY_HOST);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
dbl_send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+2*chunks[i]+info[i];
dbl_send_map_starts[j] = dbl_send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = chunks[i];
int_buffer[j2++] = info[i];
}
}
hypre_TFree(chunks, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&num_sends3,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1, HYPRE_MEMORY_HOST);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
{
displs[i] = displs[i-1]+info[i-1];
}
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs], HYPRE_MEMORY_HOST);
hypre_MPI_Allgatherv(int_buffer,num_sends3,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(info, HYPRE_MEMORY_HOST);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=3)
{
if (recv_buf[j] == my_id)
{
num_recvs++;
}
}
recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
recv_chunks = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
dbl_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
j2 = 0;
recv_vec_starts[0] = 0;
dbl_recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=3)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2] = i;
recv_chunks[j2++] = recv_buf[j+1];
recv_vec_starts[j2] = recv_vec_starts[j2-1]+2*recv_buf[j+1]
+recv_buf[j+2];
dbl_recv_vec_starts[j2] = dbl_recv_vec_starts[j2-1]+recv_buf[j+2];
}
if (j2 == num_recvs)
{
break;
}
}
}
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
hypre_TFree(displs, HYPRE_MEMORY_HOST);
/* set up data to be sent to send procs */
/* send_i contains for each send proc : row no., no. of elmts and column
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
send_data = hypre_CTAlloc(HYPRE_Complex, dbl_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
recv_i = hypre_CTAlloc(HYPRE_BigInt, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
recv_data = hypre_CTAlloc(HYPRE_Complex, dbl_recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
j=0;
jj=0;
for (i=0; i < off_proc_i_indx; i++)
{
row = off_proc_i[i++];
n = (HYPRE_Int)off_proc_i[i];
proc_id = proc_id_mem[i/2];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
iid = dbl_send_map_starts[indx];
send_i[iii++] = row;
send_i[iii++] = (HYPRE_BigInt) n;
for (ii = 0; ii < n; ii++)
{
send_i[iii++] = off_proc_j[jj];
send_data[iid++] = off_proc_data[jj++];
}
send_map_starts[indx] = iii;
dbl_send_map_starts[indx] = iid;
}
hypre_TFree(proc_id_mem, HYPRE_MEMORY_HOST);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
dbl_send_map_starts[i] = dbl_send_map_starts[i-1];
}
send_map_starts[0] = 0;
dbl_send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
if (num_requests)
{
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm,
&requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm,
&requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = dbl_recv_vec_starts[i];
vec_len = dbl_recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = dbl_send_map_starts[i];
vec_len = dbl_send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
}
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
hypre_TFree(send_procs, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(dbl_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(recv_procs, HYPRE_MEMORY_HOST);
hypre_TFree(recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(dbl_recv_vec_starts, HYPRE_MEMORY_HOST);
j = 0;
j2 = 0;
for (i=0; i < num_recvs; i++)
{
for (ii=0; ii < recv_chunks[i]; ii++)
{
row = recv_i[j];
HYPRE_Int rcvi = (HYPRE_Int) recv_i[j+1];
hypre_IJMatrixAddToValuesParCSR(matrix,1,&rcvi,&row,&row_index,
&recv_i[j+2],&recv_data[j2]);
j2 += recv_i[j+1];
j += recv_i[j+1]+2;
}
}
hypre_TFree(recv_chunks, HYPRE_MEMORY_HOST);
hypre_TFree(recv_i, HYPRE_MEMORY_HOST);
hypre_TFree(recv_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *row_list_num_elements=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
off_proc_i_h[2*i] = tmp[i];
off_proc_i_h[2*i+1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx/2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i=0; i < num_rows; i++)
{
row = off_proc_i[i*2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i*2+1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[i-1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter*2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i=0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i*2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j=0; j< num_elements; j++)
{
tmp_big_int = off_proc_j[counter+j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j=0; j< num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for(i=0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i=0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements*2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
#endif
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength*object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count+1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low+1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
#else
HYPRE_BigInt base = col_partitioning[0];
#endif
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
#else
num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]);
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
#endif
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array, *offd_array;
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
diag_pos = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
i_diag = 0;
i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size + 1);
ne = (my_thread_num+1)*(size + 1);
}
else
{
ns = my_thread_num*size + rest;
ne = (my_thread_num+1)*size + rest;
}
i_diag = 0;
i_offd = 0;
for (i=ns; i < ne; i++)
{
local_j = aux_j[i];
local_data = aux_data[i];
diag_pos[i] = -1;
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j]-col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num-1];
i_offd = offd_array[my_thread_num-1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i=ns; i < ne; i++)
{
diag_i[i] = i_diag;
offd_i[i] = i_offd;
local_j = aux_j[i];
local_data = aux_data[i];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j=j0; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i=0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j,0,nnz_offd-1);
num_cols_offd = 1;
for (i=0; i < nnz_offd-1; i++)
{
if (tmp_j[i+1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i+1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i=0; i < nnz_offd; i++)
{
offd_j[i]=hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd);
}
if (base)
{
for (i=0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
// fprintf(stderr, "from else\n");
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1=0; i1 < max_num_threads; i1++)
offproc_cnt[i1] = NULL;
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2+1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
gimplify.c | /* Tree lowering pass. This pass converts the GENERIC functions-as-trees
tree representation into the GIMPLE form.
Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Major work done by Sebastian Pop <s.pop@laposte.net>,
Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "varray.h"
#include "gimple.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "diagnostic.h"
#include "langhooks.h"
#include "langhooks-def.h"
#include "tree-flow.h"
#include "cgraph.h"
#include "timevar.h"
#include "except.h"
#include "hashtab.h"
#include "flags.h"
#include "real.h"
#include "function.h"
#include "output.h"
#include "expr.h"
#include "ggc.h"
#include "toplev.h"
#include "target.h"
#include "optabs.h"
#include "pointer-set.h"
#include "splay-tree.h"
#include "vec.h"
#include "gimple.h"
enum gimplify_omp_var_data
{
GOVD_SEEN = 1,
GOVD_EXPLICIT = 2,
GOVD_SHARED = 4,
GOVD_PRIVATE = 8,
GOVD_FIRSTPRIVATE = 16,
GOVD_LASTPRIVATE = 32,
GOVD_REDUCTION = 64,
GOVD_LOCAL = 128,
GOVD_DEBUG_PRIVATE = 256,
GOVD_PRIVATE_OUTER_REF = 512,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL)
};
enum omp_region_type
{
ORT_WORKSHARE = 0,
ORT_PARALLEL = 2,
ORT_COMBINED_PARALLEL = 3,
ORT_TASK = 4,
ORT_UNTIED_TASK = 5
};
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
splay_tree variables;
struct pointer_set_t *privatized_types;
location_t location;
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
};
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
/* Formal (expression) temporary table handling: Multiple occurrences of
the same scalar expression are evaluated into the same temporary. */
typedef struct gimple_temp_hash_elt
{
tree val; /* Key */
tree temp; /* Value */
} elt_t;
/* Forward declarations. */
static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool);
/* Mark X addressable. Unlike the langhook we expect X to be in gimple
form and we don't do any syntax checking. */
static void
mark_addressable (tree x)
{
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) != VAR_DECL && TREE_CODE (x) != PARM_DECL)
return ;
TREE_ADDRESSABLE (x) = 1;
}
/* Return a hash value for a formal temporary table entry. */
static hashval_t
gimple_tree_hash (const void *p)
{
tree t = ((const elt_t *) p)->val;
return iterative_hash_expr (t, 0);
}
/* Compare two formal temporary table entries. */
static int
gimple_tree_eq (const void *p1, const void *p2)
{
tree t1 = ((const elt_t *) p1)->val;
tree t2 = ((const elt_t *) p2)->val;
enum tree_code code = TREE_CODE (t1);
if (TREE_CODE (t2) != code
|| TREE_TYPE (t1) != TREE_TYPE (t2))
return 0;
if (!operand_equal_p (t1, t2, 0))
return 0;
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2));
return 1;
}
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
static void
gimplify_seq_add_stmt (gimple_seq *seq_p, gimple gs)
{
gimple_stmt_iterator si;
if (gs == NULL)
return;
if (*seq_p == NULL)
*seq_p = gimple_seq_alloc ();
si = gsi_last (*seq_p);
gsi_insert_after_without_update (&si, gs, GSI_NEW_STMT);
}
/* Append sequence SRC to the end of sequence *DST_P. If *DST_P is
NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_seq, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
static void
gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
if (src == NULL)
return;
if (*dst_p == NULL)
*dst_p = gimple_seq_alloc ();
si = gsi_last (*dst_p);
gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT);
}
/* Set up a context for the gimplifier. */
void
push_gimplify_context (struct gimplify_ctx *c)
{
memset (c, '\0', sizeof (*c));
c->prev_context = gimplify_ctxp;
gimplify_ctxp = c;
}
/* Tear down a context for the gimplifier. If BODY is non-null, then
put the temporaries into the outer BIND_EXPR. Otherwise, put them
in the local_decls.
BODY is not a sequence, but the first tuple in a sequence. */
void
pop_gimplify_context (gimple body)
{
struct gimplify_ctx *c = gimplify_ctxp;
tree t;
gcc_assert (c && (c->bind_expr_stack == NULL
|| VEC_empty (gimple, c->bind_expr_stack)));
VEC_free (gimple, heap, c->bind_expr_stack);
gimplify_ctxp = c->prev_context;
for (t = c->temps; t ; t = TREE_CHAIN (t))
DECL_GIMPLE_FORMAL_TEMP_P (t) = 0;
if (body)
declare_vars (c->temps, body, false);
else
record_vars (c->temps);
if (c->temp_htab)
htab_delete (c->temp_htab);
}
static void
gimple_push_bind_expr (gimple gimple_bind)
{
if (gimplify_ctxp->bind_expr_stack == NULL)
gimplify_ctxp->bind_expr_stack = VEC_alloc (gimple, heap, 8);
VEC_safe_push (gimple, heap, gimplify_ctxp->bind_expr_stack, gimple_bind);
}
static void
gimple_pop_bind_expr (void)
{
VEC_pop (gimple, gimplify_ctxp->bind_expr_stack);
}
gimple
gimple_current_bind_expr (void)
{
return VEC_last (gimple, gimplify_ctxp->bind_expr_stack);
}
/* Return the stack GIMPLE_BINDs created during gimplification. */
VEC(gimple, heap) *
gimple_bind_expr_stack (void)
{
return gimplify_ctxp->bind_expr_stack;
}
/* Returns true iff there is a COND_EXPR between us and the innermost
CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */
static bool
gimple_conditional_context (void)
{
return gimplify_ctxp->conditions > 0;
}
/* Note that we've entered a COND_EXPR. */
static void
gimple_push_condition (void)
{
#ifdef ENABLE_GIMPLE_CHECKING
if (gimplify_ctxp->conditions == 0)
gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups));
#endif
++(gimplify_ctxp->conditions);
}
/* Note that we've left a COND_EXPR. If we're back at unconditional scope
now, add any conditional cleanups we've seen to the prequeue. */
static void
gimple_pop_condition (gimple_seq *pre_p)
{
int conds = --(gimplify_ctxp->conditions);
gcc_assert (conds >= 0);
if (conds == 0)
{
gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups);
gimplify_ctxp->conditional_cleanups = NULL;
}
}
/* A stable comparison routine for use with splay trees and DECLs. */
static int
splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
{
tree a = (tree) xa;
tree b = (tree) xb;
return DECL_UID (a) - DECL_UID (b);
}
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
new_omp_context (enum omp_region_type region_type)
{
struct gimplify_omp_ctx *c;
c = XCNEW (struct gimplify_omp_ctx);
c->outer_context = gimplify_omp_ctxp;
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = pointer_set_create ();
c->location = input_location;
c->region_type = region_type;
if ((region_type & ORT_TASK) == 0)
c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
else
c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
return c;
}
/* Destroy an omp construct that deals with variable remapping. */
static void
delete_omp_context (struct gimplify_omp_ctx *c)
{
splay_tree_delete (c->variables);
pointer_set_destroy (c->privatized_types);
XDELETE (c);
}
static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int);
static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool);
/* A subroutine of append_to_statement_list{,_force}. T is not NULL. */
static void
append_to_statement_list_1 (tree t, tree *list_p)
{
tree list = *list_p;
tree_stmt_iterator i;
if (!list)
{
if (t && TREE_CODE (t) == STATEMENT_LIST)
{
*list_p = t;
return;
}
*list_p = list = alloc_stmt_list ();
}
i = tsi_last (list);
tsi_link_after (&i, t, TSI_CONTINUE_LINKING);
}
/* Add T to the end of the list container pointed to by LIST_P.
If T is an expression with no effects, it is ignored. */
void
append_to_statement_list (tree t, tree *list_p)
{
if (t && TREE_SIDE_EFFECTS (t))
append_to_statement_list_1 (t, list_p);
}
/* Similar, but the statement is always added, regardless of side effects. */
void
append_to_statement_list_force (tree t, tree *list_p)
{
if (t != NULL_TREE)
append_to_statement_list_1 (t, list_p);
}
/* Both gimplify the statement T and append it to *SEQ_P. This function
behaves exactly as gimplify_stmt, but you don't have to pass T as a
reference. */
void
gimplify_and_add (tree t, gimple_seq *seq_p)
{
gimplify_stmt (&t, seq_p);
}
/* Gimplify statement T into sequence *SEQ_P, and return the first
tuple in the sequence of generated tuples for this statement.
Return NULL if gimplifying T produced no tuples. */
static gimple
gimplify_and_return_first (tree t, gimple_seq *seq_p)
{
gimple_stmt_iterator last = gsi_last (*seq_p);
gimplify_and_add (t, seq_p);
if (!gsi_end_p (last))
{
gsi_next (&last);
return gsi_stmt (last);
}
else
return gimple_seq_first_stmt (*seq_p);
}
/* Strip off a legitimate source ending from the input string NAME of
length LEN. Rather than having to know the names used by all of
our front ends, we strip off an ending of a period followed by
up to five characters. (Java uses ".class".) */
static inline void
remove_suffix (char *name, int len)
{
int i;
for (i = 2; i < 8 && len > i; i++)
{
if (name[len - i] == '.')
{
name[len - i] = '\0';
break;
}
}
}
/* Subroutine for find_single_pointer_decl. */
static tree
find_single_pointer_decl_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data)
{
tree *pdecl = (tree *) data;
/* We are only looking for pointers at the same level as the
original tree; we must not look through any indirections.
Returning anything other than NULL_TREE will cause the caller to
not find a base. */
if (REFERENCE_CLASS_P (*tp))
return *tp;
if (DECL_P (*tp) && POINTER_TYPE_P (TREE_TYPE (*tp)))
{
if (*pdecl)
{
/* We already found a pointer decl; return anything other
than NULL_TREE to unwind from walk_tree signalling that
we have a duplicate. */
return *tp;
}
*pdecl = *tp;
}
return NULL_TREE;
}
/* Find the single DECL of pointer type in the tree T, used directly
rather than via an indirection, and return it. If there are zero
or more than one such DECLs, return NULL. */
static tree
find_single_pointer_decl (tree t)
{
tree decl = NULL_TREE;
if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL))
{
/* find_single_pointer_decl_1 returns a nonzero value, causing
walk_tree to return a nonzero value, to indicate that it
found more than one pointer DECL or that it found an
indirection. */
return NULL_TREE;
}
return decl;
}
/* Create a new temporary name with PREFIX. Returns an identifier. */
static GTY(()) unsigned int tmp_var_id_num;
tree
create_tmp_var_name (const char *prefix)
{
char *tmp_name;
if (prefix)
{
char *preftmp = ASTRDUP (prefix);
remove_suffix (preftmp, strlen (preftmp));
prefix = preftmp;
}
ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++);
return get_identifier (tmp_name);
}
/* Create a new temporary variable declaration of type TYPE.
Does NOT push it into the current binding. */
tree
create_tmp_var_raw (tree type, const char *prefix)
{
tree tmp_var;
tree new_type;
/* Make the type of the variable writable. */
new_type = build_type_variant (type, 0, 0);
TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type);
tmp_var = build_decl (VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL,
type);
/* The variable was declared by the compiler. */
DECL_ARTIFICIAL (tmp_var) = 1;
/* And we don't want debug info for it. */
DECL_IGNORED_P (tmp_var) = 1;
/* Make the variable writable. */
TREE_READONLY (tmp_var) = 0;
DECL_EXTERNAL (tmp_var) = 0;
TREE_STATIC (tmp_var) = 0;
TREE_USED (tmp_var) = 1;
return tmp_var;
}
/* Create a new temporary variable declaration of type TYPE. DOES push the
variable into the current binding. Further, assume that this is called
only from gimplification or optimization, at which point the creation of
certain types are bugs. */
tree
create_tmp_var (tree type, const char *prefix)
{
tree tmp_var;
/* We don't allow types that are addressable (meaning we can't make copies),
or incomplete. We also used to reject every variable size objects here,
but now support those for which a constant upper bound can be obtained.
The processing for variable sizes is performed in gimple_add_tmp_var,
point at which it really matters and possibly reached via paths not going
through this function, e.g. after direct calls to create_tmp_var_raw. */
gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type));
tmp_var = create_tmp_var_raw (type, prefix);
gimple_add_tmp_var (tmp_var);
return tmp_var;
}
/* Create a temporary with a name derived from VAL. Subroutine of
lookup_tmp_var; nobody else should call this function. */
static inline tree
create_tmp_from_val (tree val)
{
return create_tmp_var (TREE_TYPE (val), get_name (val));
}
/* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse
an existing expression temporary. */
static tree
lookup_tmp_var (tree val, bool is_formal)
{
tree ret;
/* If not optimizing, never really reuse a temporary. local-alloc
won't allocate any variable that is used in more than one basic
block, which means it will go into memory, causing much extra
work in reload and final and poorer code generation, outweighing
the extra memory allocation here. */
if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val))
ret = create_tmp_from_val (val);
else
{
elt_t elt, *elt_p;
void **slot;
elt.val = val;
if (gimplify_ctxp->temp_htab == NULL)
gimplify_ctxp->temp_htab
= htab_create (1000, gimple_tree_hash, gimple_tree_eq, free);
slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
elt_p->val = val;
elt_p->temp = ret = create_tmp_from_val (val);
*slot = (void *) elt_p;
}
else
{
elt_p = (elt_t *) *slot;
ret = elt_p->temp;
}
}
if (is_formal)
DECL_GIMPLE_FORMAL_TEMP_P (ret) = 1;
return ret;
}
/* Return true if T is a CALL_EXPR or an expression that can be
assignmed to a temporary. Note that this predicate should only be
used during gimplification. See the rationale for this in
gimplify_modify_expr. */
static bool
is_gimple_formal_tmp_or_call_rhs (tree t)
{
return TREE_CODE (t) == CALL_EXPR || is_gimple_formal_tmp_rhs (t);
}
/* Returns true iff T is a valid RHS for an assignment to a renamed
user -- or front-end generated artificial -- variable. */
static bool
is_gimple_reg_or_call_rhs (tree t)
{
/* If the RHS of the MODIFY_EXPR may throw or make a nonlocal goto
and the LHS is a user variable, then we need to introduce a formal
temporary. This way the optimizers can determine that the user
variable is only modified if evaluation of the RHS does not throw.
Don't force a temp of a non-renamable type; the copy could be
arbitrarily expensive. Instead we will generate a VDEF for
the assignment. */
if (is_gimple_reg_type (TREE_TYPE (t))
&& ((TREE_CODE (t) == CALL_EXPR && TREE_SIDE_EFFECTS (t))
|| tree_could_throw_p (t)))
return false;
return is_gimple_formal_tmp_or_call_rhs (t);
}
/* Return true if T is a valid memory RHS or a CALL_EXPR. Note that
this predicate should only be used during gimplification. See the
rationale for this in gimplify_modify_expr. */
static bool
is_gimple_mem_or_call_rhs (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return is_gimple_formal_tmp_or_call_rhs (t);
}
/* Returns a formal temporary variable initialized with VAL. PRE_P is as
in gimplify_expr. Only use this function if:
1) The value of the unfactored expression represented by VAL will not
change between the initialization and use of the temporary, and
2) The temporary will not be otherwise modified.
For instance, #1 means that this is inappropriate for SAVE_EXPR temps,
and #2 means it is inappropriate for && temps.
For other cases, use get_initialized_tmp_var instead. */
static tree
internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p,
bool is_formal)
{
tree t, mod;
/* Notice that we explicitly allow VAL to be a CALL_EXPR so that we
can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */
gimplify_expr (&val, pre_p, post_p, is_gimple_formal_tmp_or_call_rhs,
fb_rvalue);
t = lookup_tmp_var (val, is_formal);
if (is_formal)
{
tree u = find_single_pointer_decl (val);
if (u && TREE_CODE (u) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (u))
u = DECL_GET_RESTRICT_BASE (u);
if (u && TYPE_RESTRICT (TREE_TYPE (u)))
{
if (DECL_BASED_ON_RESTRICT_P (t))
gcc_assert (u == DECL_GET_RESTRICT_BASE (t));
else
{
DECL_BASED_ON_RESTRICT_P (t) = 1;
SET_DECL_RESTRICT_BASE (t, u);
}
}
}
if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (t) = 1;
mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val));
if (EXPR_HAS_LOCATION (val))
SET_EXPR_LOCUS (mod, EXPR_LOCUS (val));
else
SET_EXPR_LOCATION (mod, input_location);
/* gimplify_modify_expr might want to reduce this further. */
gimplify_and_add (mod, pre_p);
ggc_free (mod);
/* If we're gimplifying into ssa, gimplify_modify_expr will have
given our temporary an SSA name. Find and return it. */
if (gimplify_ctxp->into_ssa)
{
gimple last = gimple_seq_last_stmt (*pre_p);
t = gimple_get_lhs (last);
}
return t;
}
/* Returns a formal temporary variable initialized with VAL. PRE_P
points to a sequence where side-effects needed to compute VAL should be
stored. */
tree
get_formal_tmp_var (tree val, gimple_seq *pre_p)
{
return internal_get_tmp_var (val, pre_p, NULL, true);
}
/* Returns a temporary variable initialized with VAL. PRE_P and POST_P
are as in gimplify_expr. */
tree
get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p)
{
return internal_get_tmp_var (val, pre_p, post_p, false);
}
/* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is
true, generate debug info for them; otherwise don't. */
void
declare_vars (tree vars, gimple scope, bool debug_info)
{
tree last = vars;
if (last)
{
tree temps, block;
gcc_assert (gimple_code (scope) == GIMPLE_BIND);
temps = nreverse (last);
block = gimple_bind_block (scope);
gcc_assert (!block || TREE_CODE (block) == BLOCK);
if (!block || !debug_info)
{
TREE_CHAIN (last) = gimple_bind_vars (scope);
gimple_bind_set_vars (scope, temps);
}
else
{
/* We need to attach the nodes both to the BIND_EXPR and to its
associated BLOCK for debugging purposes. The key point here
is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR
is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */
if (BLOCK_VARS (block))
BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps);
else
{
gimple_bind_set_vars (scope,
chainon (gimple_bind_vars (scope), temps));
BLOCK_VARS (block) = temps;
}
}
}
}
/* For VAR a VAR_DECL of variable size, try to find a constant upper bound
for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if
no such upper bound can be obtained. */
static void
force_constant_size (tree var)
{
/* The only attempt we make is by querying the maximum size of objects
of the variable's type. */
HOST_WIDE_INT max_size;
gcc_assert (TREE_CODE (var) == VAR_DECL);
max_size = max_int_size_in_bytes (TREE_TYPE (var));
gcc_assert (max_size >= 0);
DECL_SIZE_UNIT (var)
= build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size);
DECL_SIZE (var)
= build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT);
}
void
gimple_add_tmp_var (tree tmp)
{
gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!host_integerp (DECL_SIZE_UNIT (tmp), 1))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
if (gimplify_ctxp)
{
TREE_CHAIN (tmp) = gimplify_ctxp->temps;
gimplify_ctxp->temps = tmp;
/* Mark temporaries local within the nearest enclosing parallel. */
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx && ctx->region_type == ORT_WORKSHARE)
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN);
}
}
else if (cfun)
record_vars (tmp);
else
{
gimple_seq body_seq;
/* This case is for nested functions. We need to expose the locals
they create. */
body_seq = gimple_body (current_function_decl);
declare_vars (tmp, gimple_seq_first_stmt (body_seq), false);
}
}
/* Determines whether to assign a location to the statement GS. */
static bool
should_carry_location_p (gimple gs)
{
/* Don't emit a line note for a label. We particularly don't want to
emit one for the break label, since it doesn't actually correspond
to the beginning of the loop/switch. */
if (gimple_code (gs) == GIMPLE_LABEL)
return false;
return true;
}
/* Same, but for a tree. */
static bool
tree_should_carry_location_p (const_tree stmt)
{
/* Don't emit a line note for a label. We particularly don't want to
emit one for the break label, since it doesn't actually correspond
to the beginning of the loop/switch. */
if (TREE_CODE (stmt) == LABEL_EXPR)
return false;
/* Do not annotate empty statements, since it confuses gcov. */
if (!TREE_SIDE_EFFECTS (stmt))
return false;
return true;
}
/* Return true if a location should not be emitted for this statement
by annotate_one_with_location. */
static inline bool
gimple_do_not_emit_location_p (gimple g)
{
return gimple_plf (g, GF_PLF_1);
}
/* Mark statement G so a location will not be emitted by
annotate_one_with_location. */
static inline void
gimple_set_do_not_emit_location (gimple g)
{
/* The PLF flags are initialized to 0 when a new tuple is created,
so no need to initialize it anywhere. */
gimple_set_plf (g, GF_PLF_1, true);
}
/* Set the location for gimple statement GS to LOCUS. */
static void
annotate_one_with_location (gimple gs, location_t location)
{
if (!gimple_has_location (gs)
&& !gimple_do_not_emit_location_p (gs)
&& should_carry_location_p (gs))
gimple_set_location (gs, location);
}
/* Same, but for tree T. */
static void
tree_annotate_one_with_location (tree t, location_t location)
{
if (CAN_HAVE_LOCATION_P (t)
&& ! EXPR_HAS_LOCATION (t) && tree_should_carry_location_p (t))
SET_EXPR_LOCATION (t, location);
}
/* Set LOCATION for all the statements after iterator GSI in sequence
SEQ. If GSI is pointing to the end of the sequence, start with the
first statement in SEQ. */
static void
annotate_all_with_location_after (gimple_seq seq, gimple_stmt_iterator gsi,
location_t location)
{
if (gsi_end_p (gsi))
gsi = gsi_start (seq);
else
gsi_next (&gsi);
for (; !gsi_end_p (gsi); gsi_next (&gsi))
annotate_one_with_location (gsi_stmt (gsi), location);
}
/* Set the location for all the statements in a sequence STMT_P to LOCUS. */
void
annotate_all_with_location (gimple_seq stmt_p, location_t location)
{
gimple_stmt_iterator i;
if (gimple_seq_empty_p (stmt_p))
return;
for (i = gsi_start (stmt_p); !gsi_end_p (i); gsi_next (&i))
{
gimple gs = gsi_stmt (i);
annotate_one_with_location (gs, location);
}
}
/* Same, but for statement or statement list in *STMT_P. */
void
tree_annotate_all_with_location (tree *stmt_p, location_t location)
{
tree_stmt_iterator i;
if (!*stmt_p)
return;
for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i))
{
tree t = tsi_stmt (i);
/* Assuming we've already been gimplified, we shouldn't
see nested chaining constructs anymore. */
gcc_assert (TREE_CODE (t) != STATEMENT_LIST
&& TREE_CODE (t) != COMPOUND_EXPR);
tree_annotate_one_with_location (t, location);
}
}
/* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes.
These nodes model computations that should only be done once. If we
were to unshare something like SAVE_EXPR(i++), the gimplification
process would create wrong code. */
static tree
mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data)
{
enum tree_code code = TREE_CODE (*tp);
/* Don't unshare types, decls, constants and SAVE_EXPR nodes. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant
|| code == SAVE_EXPR || code == TARGET_EXPR
/* We can't do anything sensible with a BLOCK used as an expression,
but we also can't just die when we see it because of non-expression
uses. So just avert our eyes and cross our fingers. Silly Java. */
|| code == BLOCK)
*walk_subtrees = 0;
else
{
gcc_assert (code != BIND_EXPR);
copy_tree_r (tp, walk_subtrees, data);
}
return NULL_TREE;
}
/* Callback for walk_tree to unshare most of the shared trees rooted at
*TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1),
then *TP is deep copied by calling copy_tree_r.
This unshares the same trees as copy_tree_r with the exception of
SAVE_EXPR nodes. These nodes model computations that should only be
done once. If we were to unshare something like SAVE_EXPR(i++), the
gimplification process would create wrong code. */
static tree
copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Skip types, decls, and constants. But we do want to look at their
types and the bounds of types. Mark them as visited so we properly
unmark their subtrees on the unmark pass. If we've already seen them,
don't look down further. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
{
if (TREE_VISITED (t))
*walk_subtrees = 0;
else
TREE_VISITED (t) = 1;
}
/* If this node has been visited already, unshare it and don't look
any deeper. */
else if (TREE_VISITED (t))
{
walk_tree (tp, mostly_copy_tree_r, NULL, NULL);
*walk_subtrees = 0;
}
/* Otherwise, mark the tree as visited and keep looking. */
else
TREE_VISITED (t) = 1;
return NULL_TREE;
}
static tree
unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
if (TREE_VISITED (*tp))
TREE_VISITED (*tp) = 0;
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the
bodies of any nested functions if we are unsharing the entire body of
FNDECL. */
static void
unshare_body (tree *body_p, tree fndecl)
{
struct cgraph_node *cgn = cgraph_node (fndecl);
walk_tree (body_p, copy_if_shared_r, NULL, NULL);
if (body_p == &DECL_SAVED_TREE (fndecl))
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl);
}
/* Likewise, but mark all trees as not visited. */
static void
unvisit_body (tree *body_p, tree fndecl)
{
struct cgraph_node *cgn = cgraph_node (fndecl);
walk_tree (body_p, unmark_visited_r, NULL, NULL);
if (body_p == &DECL_SAVED_TREE (fndecl))
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl);
}
/* Unconditionally make an unshared copy of EXPR. This is used when using
stored expressions which span multiple functions, such as BINFO_VTABLE,
as the normal unsharing process can't tell that they're shared. */
tree
unshare_expr (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
return expr;
}
/* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both
contain statements and have a value. Assign its value to a temporary
and give it void_type_node. Returns the temporary, or NULL_TREE if
WRAPPER was already void. */
tree
voidify_wrapper_expr (tree wrapper, tree temp)
{
tree type = TREE_TYPE (wrapper);
if (type && !VOID_TYPE_P (type))
{
tree *p;
/* Set p to point to the body of the wrapper. Loop until we find
something that isn't a wrapper. */
for (p = &wrapper; p && *p; )
{
switch (TREE_CODE (*p))
{
case BIND_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
/* For a BIND_EXPR, the body is operand 1. */
p = &BIND_EXPR_BODY (*p);
break;
case CLEANUP_POINT_EXPR:
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
case STATEMENT_LIST:
{
tree_stmt_iterator i = tsi_last (*p);
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i);
}
break;
case COMPOUND_EXPR:
/* Advance to the last statement. Set all container types to void. */
for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1))
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
}
break;
default:
goto out;
}
}
out:
if (p == NULL || IS_EMPTY_STMT (*p))
temp = NULL_TREE;
else if (temp)
{
/* The wrapper is on the RHS of an assignment that we're pushing
down. */
gcc_assert (TREE_CODE (temp) == INIT_EXPR
|| TREE_CODE (temp) == MODIFY_EXPR);
TREE_OPERAND (temp, 1) = *p;
*p = temp;
}
else
{
temp = create_tmp_var (type, "retval");
*p = build2 (INIT_EXPR, type, temp, *p);
}
return temp;
}
return NULL_TREE;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
a temporary through which they communicate. */
static void
build_stack_save_restore (gimple *save, gimple *restore)
{
tree tmp_var;
*save = gimple_build_call (implicit_built_in_decls[BUILT_IN_STACK_SAVE], 0);
tmp_var = create_tmp_var (ptr_type_node, "saved_stack");
gimple_call_set_lhs (*save, tmp_var);
*restore = gimple_build_call (implicit_built_in_decls[BUILT_IN_STACK_RESTORE],
1, tmp_var);
}
/* Gimplify a BIND_EXPR. Just voidify and recurse. */
static enum gimplify_status
gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
{
tree bind_expr = *expr_p;
bool old_save_stack = gimplify_ctxp->save_stack;
tree t;
gimple gimple_bind;
gimple_seq body;
tree temp = voidify_wrapper_expr (bind_expr, NULL);
/* Mark variables seen in this bind expr. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t))
{
if (TREE_CODE (t) == VAR_DECL)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Mark variable as local. */
if (ctx && !is_global_var (t)
&& (! DECL_SEEN_IN_BIND_EXPR_P (t)
|| splay_tree_lookup (ctx->variables,
(splay_tree_key) t) == NULL))
omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN);
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun)
cfun->has_local_explicit_reg_vars = true;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (t)
&& (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t))
&& !needs_to_live_in_memory (t))
DECL_GIMPLE_REG_P (t) = 1;
}
gimple_bind = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL,
BIND_EXPR_BLOCK (bind_expr));
gimple_push_bind_expr (gimple_bind);
gimplify_ctxp->save_stack = false;
/* Gimplify the body into the GIMPLE_BIND tuple's body. */
body = NULL;
gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body);
gimple_bind_set_body (gimple_bind, body);
if (gimplify_ctxp->save_stack)
{
gimple stack_save, stack_restore, gs;
gimple_seq cleanup, new_body;
/* Save stack on entry and restore it on exit. Add a try_finally
block to achieve this. Note that mudflap depends on the
format of the emitted code: see mx_register_decls(). */
build_stack_save_restore (&stack_save, &stack_restore);
cleanup = new_body = NULL;
gimplify_seq_add_stmt (&cleanup, stack_restore);
gs = gimple_build_try (gimple_bind_body (gimple_bind), cleanup,
GIMPLE_TRY_FINALLY);
gimplify_seq_add_stmt (&new_body, stack_save);
gimplify_seq_add_stmt (&new_body, gs);
gimple_bind_set_body (gimple_bind, new_body);
}
gimplify_ctxp->save_stack = old_save_stack;
gimple_pop_bind_expr ();
gimplify_seq_add_stmt (pre_p, gimple_bind);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify a RETURN_EXPR. If the expression to be returned is not a
GIMPLE value, it is assigned to a new temporary and the statement is
re-written to return the temporary.
PRE_P points to the sequence where side effects that must happen before
STMT should be stored. */
static enum gimplify_status
gimplify_return_expr (tree stmt, gimple_seq *pre_p)
{
gimple ret;
tree ret_expr = TREE_OPERAND (stmt, 0);
tree result_decl, result;
if (ret_expr == error_mark_node)
return GS_ERROR;
if (!ret_expr
|| TREE_CODE (ret_expr) == RESULT_DECL
|| ret_expr == error_mark_node)
{
gimple ret = gimple_build_return (ret_expr);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
result_decl = NULL_TREE;
else
{
result_decl = TREE_OPERAND (ret_expr, 0);
/* See through a return by reference. */
if (TREE_CODE (result_decl) == INDIRECT_REF)
result_decl = TREE_OPERAND (result_decl, 0);
gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
|| TREE_CODE (ret_expr) == INIT_EXPR)
&& TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
Recall that aggregate_value_p is FALSE for any aggregate type that is
returned in registers. If we're returning values in registers, then
we don't want to extend the lifetime of the RESULT_DECL, particularly
across another call. In addition, for those aggregates for which
hard_function_value generates a PARALLEL, we'll die during normal
expansion of structure assignments; there's special code in expand_return
to handle this case that does not exist in expand_expr. */
if (!result_decl
|| aggregate_value_p (result_decl, TREE_TYPE (current_function_decl)))
result = result_decl;
else if (gimplify_ctxp->return_temp)
result = gimplify_ctxp->return_temp;
else
{
result = create_tmp_var (TREE_TYPE (result_decl), NULL);
if (TREE_CODE (TREE_TYPE (result)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (result)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (result) = 1;
/* ??? With complex control flow (usually involving abnormal edges),
we can wind up warning about an uninitialized value for this. Due
to how this variable is constructed and initialized, this is never
true. Give up and never warn. */
TREE_NO_WARNING (result) = 1;
gimplify_ctxp->return_temp = result;
}
/* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use.
Then gimplify the whole thing. */
if (result != result_decl)
TREE_OPERAND (ret_expr, 0) = result;
gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p);
ret = gimple_build_return (result);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
static void
gimplify_vla_decl (tree decl, gimple_seq *seq_p)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. Note that mudflap depends on the format
of the emitted code: see mx_register_decls(). */
tree t, addr, ptr_type;
gimplify_one_sizepos (&DECL_SIZE (decl), seq_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p);
/* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
where to find the value. */
ptr_type = build_pointer_type (TREE_TYPE (decl));
addr = create_tmp_var (ptr_type, get_name (decl));
DECL_IGNORED_P (addr) = 0;
t = build_fold_indirect_ref (addr);
SET_DECL_VALUE_EXPR (decl, t);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
t = built_in_decls[BUILT_IN_ALLOCA];
t = build_call_expr (t, 1, DECL_SIZE_UNIT (decl));
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, seq_p);
/* Indicate that we need to restore the stack level when the
enclosing BIND_EXPR is exited. */
gimplify_ctxp->save_stack = true;
}
/* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation
and initialization explicit. */
static enum gimplify_status
gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p)
{
tree stmt = *stmt_p;
tree decl = DECL_EXPR_DECL (stmt);
*stmt_p = NULL_TREE;
if (TREE_TYPE (decl) == error_mark_node)
return GS_ERROR;
if ((TREE_CODE (decl) == TYPE_DECL
|| TREE_CODE (decl) == VAR_DECL)
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
gimplify_type_sizes (TREE_TYPE (decl), seq_p);
if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
if (TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
|| (!TREE_STATIC (decl)
&& flag_stack_check == GENERIC_STACK_CHECK
&& compare_tree_int (DECL_SIZE_UNIT (decl),
STACK_CHECK_MAX_VAR_SIZE) > 0))
gimplify_vla_decl (decl, seq_p);
if (init && init != error_mark_node)
{
if (!TREE_STATIC (decl))
{
DECL_INITIAL (decl) = NULL_TREE;
init = build2 (INIT_EXPR, void_type_node, decl, init);
gimplify_and_add (init, seq_p);
ggc_free (init);
}
else
/* We must still examine initializers for static variables
as they may contain a label address. */
walk_tree (&init, force_labels_r, NULL, NULL);
}
/* Some front ends do not explicitly declare all anonymous
artificial variables. We compensate here by declaring the
variables, though it would be better if the front ends would
explicitly declare them. */
if (!DECL_SEEN_IN_BIND_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE)
gimple_add_tmp_var (decl);
}
return GS_ALL_DONE;
}
/* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body
and replacing the LOOP_EXPR with goto, but if the loop contains an
EXIT_EXPR, we need to append a label for it to jump to. */
static enum gimplify_status
gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p)
{
tree saved_label = gimplify_ctxp->exit_label;
tree start_label = create_artificial_label ();
gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label));
gimplify_ctxp->exit_label = NULL_TREE;
gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p);
gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label));
if (gimplify_ctxp->exit_label)
gimplify_seq_add_stmt (pre_p, gimple_build_label (gimplify_ctxp->exit_label));
gimplify_ctxp->exit_label = saved_label;
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplifies a statement list onto a sequence. These may be created either
by an enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p, gimple_seq *pre_p)
{
tree temp = voidify_wrapper_expr (*expr_p, NULL);
tree_stmt_iterator i = tsi_start (*expr_p);
while (!tsi_end_p (i))
{
gimplify_stmt (tsi_stmt_ptr (i), pre_p);
tsi_delink (&i);
}
if (temp)
{
*expr_p = temp;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Compare two case labels. Because the front end should already have
made sure that case ranges do not overlap, it is enough to only compare
the CASE_LOW values of each case label. */
static int
compare_case_labels (const void *p1, const void *p2)
{
const_tree const case1 = *(const_tree const*)p1;
const_tree const case2 = *(const_tree const*)p2;
/* The 'default' case label always goes first. */
if (!CASE_LOW (case1))
return -1;
else if (!CASE_LOW (case2))
return 1;
else
return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
}
/* Sort the case labels in LABEL_VEC in place in ascending order. */
void
sort_case_labels (VEC(tree,heap)* label_vec)
{
size_t len = VEC_length (tree, label_vec);
qsort (VEC_address (tree, label_vec), len, sizeof (tree),
compare_case_labels);
}
/* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can
branch to. */
static enum gimplify_status
gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
{
tree switch_expr = *expr_p;
gimple_seq switch_body_seq = NULL;
enum gimplify_status ret;
ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val,
fb_rvalue);
if (ret == GS_ERROR || ret == GS_UNHANDLED)
return ret;
if (SWITCH_BODY (switch_expr))
{
VEC (tree,heap) *labels;
VEC (tree,heap) *saved_labels;
tree default_case = NULL_TREE;
size_t i, len;
gimple gimple_switch;
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
gcc_assert (!SWITCH_LABELS (switch_expr));
/* save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8);
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
i = 0;
while (i < VEC_length (tree, labels))
{
tree elt = VEC_index (tree, labels, i);
tree low = CASE_LOW (elt);
bool remove_element = FALSE;
if (low)
{
/* Discard empty ranges. */
tree high = CASE_HIGH (elt);
if (high && tree_int_cst_lt (high, low))
remove_element = TRUE;
}
else
{
/* The default case must be the last label in the list. */
gcc_assert (!default_case);
default_case = elt;
remove_element = TRUE;
}
if (remove_element)
VEC_ordered_remove (tree, labels, i);
else
i++;
}
len = i;
if (!VEC_empty (tree, labels))
sort_case_labels (labels);
if (!default_case)
{
tree type = TREE_TYPE (switch_expr);
/* If the switch has no default label, add one, so that we jump
around the switch body. If the labels already cover the whole
range of type, add the default label pointing to one of the
existing labels. */
if (type == void_type_node)
type = TREE_TYPE (SWITCH_COND (switch_expr));
if (len
&& INTEGRAL_TYPE_P (type)
&& TYPE_MIN_VALUE (type)
&& TYPE_MAX_VALUE (type)
&& tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)),
TYPE_MIN_VALUE (type)))
{
tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1));
if (!high)
high = CASE_LOW (VEC_index (tree, labels, len - 1));
if (tree_int_cst_equal (high, TYPE_MAX_VALUE (type)))
{
for (i = 1; i < len; i++)
{
high = CASE_LOW (VEC_index (tree, labels, i));
low = CASE_HIGH (VEC_index (tree, labels, i - 1));
if (!low)
low = CASE_LOW (VEC_index (tree, labels, i - 1));
if ((TREE_INT_CST_LOW (low) + 1
!= TREE_INT_CST_LOW (high))
|| (TREE_INT_CST_HIGH (low)
+ (TREE_INT_CST_LOW (high) == 0)
!= TREE_INT_CST_HIGH (high)))
break;
}
if (i == len)
default_case = build3 (CASE_LABEL_EXPR, void_type_node,
NULL_TREE, NULL_TREE,
CASE_LABEL (VEC_index (tree,
labels, 0)));
}
}
if (!default_case)
{
gimple new_default;
default_case = build3 (CASE_LABEL_EXPR, void_type_node,
NULL_TREE, NULL_TREE,
create_artificial_label ());
new_default = gimple_build_label (CASE_LABEL (default_case));
gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
}
gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr),
default_case, labels);
gimplify_seq_add_stmt (pre_p, gimple_switch);
gimplify_seq_add_seq (pre_p, switch_body_seq);
VEC_free(tree, heap, labels);
}
else
gcc_assert (SWITCH_LABELS (switch_expr));
return GS_ALL_DONE;
}
static enum gimplify_status
gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p)
{
struct gimplify_ctx *ctxp;
gimple gimple_label;
/* Invalid OpenMP programs can play Duff's Device type games with
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
if (ctxp->case_labels)
break;
gimple_label = gimple_build_label (CASE_LABEL (*expr_p));
VEC_safe_push (tree, heap, ctxp->case_labels, *expr_p);
gimplify_seq_add_stmt (pre_p, gimple_label);
return GS_ALL_DONE;
}
/* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first
if necessary. */
tree
build_and_jump (tree *label_p)
{
if (label_p == NULL)
/* If there's nowhere to jump, just fall through. */
return NULL_TREE;
if (*label_p == NULL_TREE)
{
tree label = create_artificial_label ();
*label_p = label;
}
return build1 (GOTO_EXPR, void_type_node, *label_p);
}
/* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR.
This also involves building a label to jump to and communicating it to
gimplify_loop_expr through gimplify_ctxp->exit_label. */
static enum gimplify_status
gimplify_exit_expr (tree *expr_p)
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree expr;
expr = build_and_jump (&gimplify_ctxp->exit_label);
expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE);
*expr_p = expr;
return GS_OK;
}
/* A helper function to be called via walk_tree. Mark all labels under *TP
as being forced. To be called for DECL_INITIAL of static variables. */
tree
force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
if (TREE_CODE (*tp) == LABEL_DECL)
FORCED_LABEL (*tp) = 1;
return NULL_TREE;
}
/* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is
different from its canonical type, wrap the whole thing inside a
NOP_EXPR and force the type of the COMPONENT_REF to be the canonical
type.
The canonical type of a COMPONENT_REF is the type of the field being
referenced--unless the field is a bit-field which can be read directly
in a smaller mode, in which case the canonical type is the
sign-appropriate type corresponding to that mode. */
static void
canonicalize_component_ref (tree *expr_p)
{
tree expr = *expr_p;
tree type;
gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
else
type = TREE_TYPE (TREE_OPERAND (expr, 1));
/* One could argue that all the stuff below is not necessary for
the non-bitfield case and declare it a FE error if type
adjustment would be needed. */
if (TREE_TYPE (expr) != type)
{
#ifdef ENABLE_TYPES_CHECKING
tree old_type = TREE_TYPE (expr);
#endif
int type_quals;
/* We need to preserve qualifiers and propagate them from
operand 0. */
type_quals = TYPE_QUALS (type)
| TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0)));
if (TYPE_QUALS (type) != type_quals)
type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals);
/* Set the type of the COMPONENT_REF to the underlying type. */
TREE_TYPE (expr) = type;
#ifdef ENABLE_TYPES_CHECKING
/* It is now a FE error, if the conversion from the canonical
type to the original expression type is not useless. */
gcc_assert (useless_type_conversion_p (old_type, type));
#endif
}
}
/* If a NOP conversion is changing a pointer to array of foo to a pointer
to foo, embed that change in the ADDR_EXPR by converting
T array[U];
(T *)&array
==>
&array[L]
where L is the lower bound. For simplicity, only do this for constant
lower bound.
The constraint is that the type of &array[L] is trivially convertible
to T *. */
static void
canonicalize_addr_expr (tree *expr_p)
{
tree expr = *expr_p;
tree addr_expr = TREE_OPERAND (expr, 0);
tree datype, ddatype, pddatype;
/* We simplify only conversions from an ADDR_EXPR to a pointer type. */
if (!POINTER_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (addr_expr) != ADDR_EXPR)
return;
/* The addr_expr type should be a pointer to an array. */
datype = TREE_TYPE (TREE_TYPE (addr_expr));
if (TREE_CODE (datype) != ARRAY_TYPE)
return;
/* The pointer to element type shall be trivially convertible to
the expression pointer type. */
ddatype = TREE_TYPE (datype);
pddatype = build_pointer_type (ddatype);
if (!useless_type_conversion_p (pddatype, ddatype))
return;
/* The lower bound and element sizes must be constant. */
if (!TYPE_SIZE_UNIT (ddatype)
|| TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST
|| !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype))
|| TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST)
return;
/* All checks succeeded. Build a new node to merge the cast. */
*expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0),
TYPE_MIN_VALUE (TYPE_DOMAIN (datype)),
NULL_TREE, NULL_TREE);
*expr_p = build1 (ADDR_EXPR, pddatype, *expr_p);
}
/* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions
underneath as appropriate. */
static enum gimplify_status
gimplify_conversion (tree *expr_p)
{
tree tem;
gcc_assert (CONVERT_EXPR_P (*expr_p));
/* Then strip away all but the outermost conversion. */
STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0));
/* And remove the outermost conversion if it's useless. */
if (tree_ssa_useless_type_conversion (*expr_p))
*expr_p = TREE_OPERAND (*expr_p, 0);
/* Attempt to avoid NOP_EXPR by producing reference to a subtype.
For example this fold (subclass *)&A into &A->subclass avoiding
a need for statement. */
if (CONVERT_EXPR_P (*expr_p)
&& POINTER_TYPE_P (TREE_TYPE (*expr_p))
&& POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (*expr_p, 0)))
&& (tem = maybe_fold_offset_to_address
(TREE_OPERAND (*expr_p, 0),
integer_zero_node, TREE_TYPE (*expr_p))) != NULL_TREE)
*expr_p = tem;
/* If we still have a conversion at the toplevel,
then canonicalize some constructs. */
if (CONVERT_EXPR_P (*expr_p))
{
tree sub = TREE_OPERAND (*expr_p, 0);
/* If a NOP conversion is changing the type of a COMPONENT_REF
expression, then canonicalize its type now in order to expose more
redundant conversions. */
if (TREE_CODE (sub) == COMPONENT_REF)
canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0));
/* If a NOP conversion is changing a pointer to array of foo
to a pointer to foo, embed that change in the ADDR_EXPR. */
else if (TREE_CODE (sub) == ADDR_EXPR)
canonicalize_addr_expr (expr_p);
}
/* If we have a conversion to a non-register type force the
use of a VIEW_CONVERT_EXPR instead. */
if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p)))
*expr_p = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
return GS_OK;
}
/* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
gimplify_var_or_parm_decl (tree *expr_p)
{
tree decl = *expr_p;
/* ??? If this is a local variable, and it has not been seen in any
outer BIND_EXPR, then it's probably the result of a duplicate
declaration, for which we've already issued an error. It would
be really nice if the front end wouldn't leak these at all.
Currently the only known culprit is C++ destructors, as seen
in g++.old-deja/g++.jason/binding.C. */
if (TREE_CODE (decl) == VAR_DECL
&& !DECL_SEEN_IN_BIND_EXPR_P (decl)
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)
&& decl_function_context (decl) == current_function_decl)
{
gcc_assert (errorcount || sorrycount);
return GS_ERROR;
}
/* When within an OpenMP context, notice uses of variables. */
if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true))
return GS_ALL_DONE;
/* If the decl is an alias for another expression, substitute it now. */
if (DECL_HAS_VALUE_EXPR_P (decl))
{
*expr_p = unshare_expr (DECL_VALUE_EXPR (decl));
return GS_OK;
}
return GS_ALL_DONE;
}
/* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR
node *EXPR_P.
compound_lval
: min_lval '[' val ']'
| min_lval '.' ID
| compound_lval '[' val ']'
| compound_lval '.' ID
This is not part of the original SIMPLE definition, which separates
array and member references, but it seems reasonable to handle them
together. Also, this way we don't run into problems with union
aliasing; gcc requires that for accesses through a union to alias, the
union reference must be explicit, which was not always the case when we
were splitting up array and member refs.
PRE_P points to the sequence where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the sequence where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
fallback_t fallback)
{
tree *p;
VEC(tree,heap) *stack;
enum gimplify_status ret = GS_OK, tret;
int i;
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
stack = VEC_alloc (tree, heap, 10);
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
{
restart:
/* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */
if (TREE_CODE (*p) == INDIRECT_REF)
*p = fold_indirect_ref (*p);
if (handled_component_p (*p))
;
/* Expand DECL_VALUE_EXPR now. In some cases that may expose
additional COMPONENT_REFs. */
else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL)
&& gimplify_var_or_parm_decl (p) == GS_OK)
goto restart;
else
break;
VEC_safe_push (tree, heap, stack, *p);
}
gcc_assert (VEC_length (tree, stack));
/* Now STACK is a stack of pointers to all the refs we've walked through
and P points to the innermost expression.
Java requires that we elaborated nodes in source order. That
means we must gimplify the inner expression followed by each of
the indices, in order. But we can't gimplify the inner
expression until we deal with any variable bounds, sizes, or
positions in order to deal with PLACEHOLDER_EXPRs.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
for (i = VEC_length (tree, stack) - 1; i >= 0; i--)
{
tree t = VEC_index (tree, stack, i);
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the low bound and element type size and put them into
the ARRAY_REF. If these values are set, they have already been
gimplified. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree low = unshare_expr (array_ref_low_bound (t));
if (!is_gimple_min_invariant (low))
{
TREE_OPERAND (t, 2) = low;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_formal_tmp_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
if (TREE_OPERAND (t, 3) == NULL_TREE)
{
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)));
tree elmt_size = unshare_expr (array_ref_element_size (t));
tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type));
/* Divide the element size by the alignment of the element
type (above). */
elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor);
if (!is_gimple_min_invariant (elmt_size))
{
TREE_OPERAND (t, 3) = elmt_size;
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p,
post_p, is_gimple_formal_tmp_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == COMPONENT_REF)
{
/* Set the field offset into T and gimplify it. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree offset = unshare_expr (component_ref_field_offset (t));
tree field = TREE_OPERAND (t, 1);
tree factor
= size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT);
/* Divide the offset by its alignment. */
offset = size_binop (EXACT_DIV_EXPR, offset, factor);
if (!is_gimple_min_invariant (offset))
{
TREE_OPERAND (t, 2) = offset;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_formal_tmp_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
/* Step 2 is to gimplify the base expression. Make sure lvalue is set
so as to match the min_lval predicate. Failure to do so may result
in the creation of large aggregate temporaries. */
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval,
fallback | fb_lvalue);
ret = MIN (ret, tret);
/* And finally, the indices and operands to BIT_FIELD_REF. During this
loop we also remove any useless conversions. */
for (; VEC_length (tree, stack) > 0; )
{
tree t = VEC_pop (tree, stack);
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the dimension.
Temporary fix for gcc.c-torture/execute/20040313-1.c.
Gimplify non-constant array indices into a temporary
variable.
FIXME - The real fix is to gimplify post-modify
expressions into a minimal gimple lvalue. However, that
exposes bugs in alias analysis. The alias analyzer does
not handle &PTR->FIELD very well. Will fix after the
branch is merged into mainline (dnovillo 2004-05-03). */
if (!is_gimple_min_invariant (TREE_OPERAND (t, 1)))
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_formal_tmp_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == BIT_FIELD_REF)
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0));
/* The innermost expression P may have originally had
TREE_SIDE_EFFECTS set which would have caused all the outer
expressions in *EXPR_P leading to P to also have had
TREE_SIDE_EFFECTS set. */
recalculate_side_effects (t);
}
/* If the outermost expression is a COMPONENT_REF, canonicalize its type. */
if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF)
{
canonicalize_component_ref (expr_p);
ret = MIN (ret, GS_OK);
}
VEC_free (tree, heap, stack);
return ret;
}
/* Gimplify the self modifying expression pointed to by EXPR_P
(++, --, +=, -=).
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1;
gimple_seq post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
code = TREE_CODE (*expr_p);
gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
|| code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
/* Faster to treat as prefix if result is not used. */
postfix = want_value;
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
else
arith_code = MINUS_EXPR;
/* Gimplify the LHS into a GIMPLE lvalue. */
lvalue = TREE_OPERAND (*expr_p, 0);
ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* Extract the operands to the arithmetic operation. */
lhs = lvalue;
rhs = TREE_OPERAND (*expr_p, 1);
/* For postfix operator, we evaluate the LHS to an rvalue and then use
that as the result value and in the postqueue operation. */
if (postfix)
{
ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
}
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
rhs = fold_convert (sizetype, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1 (NEGATE_EXPR, TREE_TYPE (rhs), rhs);
arith_code = POINTER_PLUS_EXPR;
}
t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs);
if (postfix)
{
gimplify_assign (lvalue, t1, orig_post_p);
gimplify_seq_add_seq (orig_post_p, post);
*expr_p = lhs;
return GS_ALL_DONE;
}
else
{
*expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1);
return GS_OK;
}
}
/* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */
static void
maybe_with_size_expr (tree *expr_p)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
tree size;
/* If we've already wrapped this or the type is error_mark_node, we can't do
anything. */
if (TREE_CODE (expr) == WITH_SIZE_EXPR
|| type == error_mark_node)
return;
/* If the size isn't known or is a constant, we have nothing to do. */
size = TYPE_SIZE_UNIT (type);
if (!size || TREE_CODE (size) == INTEGER_CST)
return;
/* Otherwise, make a WITH_SIZE_EXPR. */
size = unshare_expr (size);
size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr);
*expr_p = build2 (WITH_SIZE_EXPR, type, expr, size);
}
/* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P
Store any side-effects in PRE_P. CALL_LOCATION is the location of
the CALL_EXPR. */
static enum gimplify_status
gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location)
{
bool (*test) (tree);
fallback_t fb;
/* In general, we allow lvalues for function arguments to avoid
extra overhead of copying large aggregates out of even larger
aggregates into temporaries only to copy the temporaries to
the argument list. Make optimizers happy by pulling out to
temporaries those types that fit in registers. */
if (is_gimple_reg_type (TREE_TYPE (*arg_p)))
test = is_gimple_val, fb = fb_rvalue;
else
test = is_gimple_lvalue, fb = fb_either;
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (arg_p);
/* Make sure arguments have the same location as the function call
itself. */
protected_set_expr_location (*arg_p, call_location);
/* There is a sequence point before a function call. Side effects in
the argument list must occur before the actual call. So, when
gimplifying arguments, force gimplify_expr to use an internal
post queue which is then appended to the end of PRE_P. */
return gimplify_expr (arg_p, pre_p, NULL, test, fb);
}
/* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P.
WANT_VALUE is true if the result of the call is desired. */
static enum gimplify_status
gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree fndecl, parms, p;
enum gimplify_status ret;
int i, nargs;
gimple call;
bool builtin_va_start_p = FALSE;
gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
(and more efficient) builtin function calls under certain
circumstances. Unfortunately, gimplification can muck things
up enough that the builtin expanders are not aware that certain
transformations are still valid.
So we attempt transformation/gimplification of the call before
we gimplify the CALL_EXPR. At this time we do not manage to
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
fndecl = get_callee_fndecl (*expr_p);
if (fndecl && DECL_BUILT_IN (fndecl))
{
tree new_tree = fold_call_expr (*expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fndecl) == BUILT_IN_VA_START)
{
builtin_va_start_p = TRUE;
if (call_expr_nargs (*expr_p) < 2)
{
error ("too few arguments to function %<va_start%>");
*expr_p = build_empty_stmt ();
return GS_OK;
}
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt ();
return GS_OK;
}
}
}
/* There is a sequence point before the call, so any side effects in
the calling expression must occur before the actual call. Force
gimplify_expr to use an internal post queue. */
ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL,
is_gimple_call_addr, fb_rvalue);
nargs = call_expr_nargs (*expr_p);
/* Get argument types for verification. */
fndecl = get_callee_fndecl (*expr_p);
parms = NULL_TREE;
if (fndecl)
parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
else if (POINTER_TYPE_P (TREE_TYPE (CALL_EXPR_FN (*expr_p))))
parms = TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (*expr_p))));
if (fndecl && DECL_ARGUMENTS (fndecl))
p = DECL_ARGUMENTS (fndecl);
else if (parms)
p = parms;
else
p = NULL_TREE;
for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p))
;
/* If the last argument is __builtin_va_arg_pack () and it is not
passed as a named argument, decrease the number of CALL_EXPR
arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */
if (!p
&& i < nargs
&& TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR)
{
tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1);
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
&& TREE_CODE (last_arg_fndecl) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK)
{
tree call = *expr_p;
--nargs;
*expr_p = build_call_array (TREE_TYPE (call), CALL_EXPR_FN (call),
nargs, CALL_EXPR_ARGP (call));
/* Copy all CALL_EXPR flags, location and block, except
CALL_EXPR_VA_ARG_PACK flag. */
CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call);
CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call);
CALL_EXPR_RETURN_SLOT_OPT (*expr_p)
= CALL_EXPR_RETURN_SLOT_OPT (call);
CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call);
CALL_CANNOT_INLINE_P (*expr_p) = CALL_CANNOT_INLINE_P (call);
SET_EXPR_LOCUS (*expr_p, EXPR_LOCUS (call));
TREE_BLOCK (*expr_p) = TREE_BLOCK (call);
/* Set CALL_EXPR_VA_ARG_PACK. */
CALL_EXPR_VA_ARG_PACK (*expr_p) = 1;
}
}
/* Finally, gimplify the function arguments. */
if (nargs > 0)
{
for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0);
PUSH_ARGS_REVERSED ? i >= 0 : i < nargs;
PUSH_ARGS_REVERSED ? i-- : i++)
{
enum gimplify_status t;
/* Avoid gimplifying the second argument to va_start, which needs to
be the plain PARM_DECL. */
if ((i != 1) || !builtin_va_start_p)
{
t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p));
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
tree new_tree = fold_call_expr (*expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
else
{
*expr_p = error_mark_node;
return GS_ERROR;
}
/* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its
decl. This allows us to eliminate redundant or useless
calls to "const" functions. */
if (TREE_CODE (*expr_p) == CALL_EXPR)
{
int flags = call_expr_flags (*expr_p);
if (flags & (ECF_CONST | ECF_PURE)
/* An infinite loop is considered a side effect. */
&& !(flags & (ECF_LOOPING_CONST_OR_PURE)))
TREE_SIDE_EFFECTS (*expr_p) = 0;
}
/* If the value is not needed by the caller, emit a new GIMPLE_CALL
and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified
form and delegate the creation of a GIMPLE_CALL to
gimplify_modify_expr. This is always possible because when
WANT_VALUE is true, the caller wants the result of this call into
a temporary, which means that we will emit an INIT_EXPR in
internal_get_tmp_var which will then be handled by
gimplify_modify_expr. */
if (!want_value)
{
/* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we
have to do is replicate it as a GIMPLE_CALL tuple. */
call = gimple_build_call_from_tree (*expr_p);
gimplify_seq_add_stmt (pre_p, call);
*expr_p = NULL_TREE;
}
return ret;
}
/* Handle shortcut semantics in the predicate operand of a COND_EXPR by
rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs.
TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the
condition is true or false, respectively. If null, we should generate
our own to skip over the evaluation of this specific expression.
This function is the tree equivalent of do_jump.
shortcut_cond_r should only be called by shortcut_cond_expr. */
static tree
shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p)
{
tree local_label = NULL_TREE;
tree t, expr = NULL;
/* OK, it's not a simple case; we need to pull apart the COND_EXPR to
retain the shortcut semantics. Just insert the gotos here;
shortcut_cond_expr will append the real blocks later. */
if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
/* Turn if (a && b) into
if (a); else goto no;
if (b) goto yes; else goto no;
(no:) */
if (false_label_p == NULL)
false_label_p = &local_label;
t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p);
append_to_statement_list (t, &expr);
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
/* Turn if (a || b) into
if (a) goto yes;
if (b) goto yes; else goto no;
(yes:) */
if (true_label_p == NULL)
true_label_p = &local_label;
t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL);
append_to_statement_list (t, &expr);
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == COND_EXPR
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1)))
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2))))
{
/* As long as we're messing with gotos, turn if (a ? b : c) into
if (a)
if (b) goto yes; else goto no;
else
if (c) goto yes; else goto no;
Don't do this if one of the arms has void type, which can happen
in C++ when the arm is throw. */
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0),
shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p),
shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p,
false_label_p));
}
else
{
expr = build3 (COND_EXPR, void_type_node, pred,
build_and_jump (true_label_p),
build_and_jump (false_label_p));
}
if (local_label)
{
t = build1 (LABEL_EXPR, void_type_node, local_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* Given a conditional expression EXPR with short-circuit boolean
predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the
predicate appart into the equivalent sequence of conditionals. */
static tree
shortcut_cond_expr (tree expr)
{
tree pred = TREE_OPERAND (expr, 0);
tree then_ = TREE_OPERAND (expr, 1);
tree else_ = TREE_OPERAND (expr, 2);
tree true_label, false_label, end_label, t;
tree *true_label_p;
tree *false_label_p;
bool emit_end, emit_false, jump_over_else;
bool then_se = then_ && TREE_SIDE_EFFECTS (then_);
bool else_se = else_ && TREE_SIDE_EFFECTS (else_);
/* First do simple transformations. */
if (!else_se)
{
/* If there is no 'else', turn (a && b) into if (a) if (b). */
while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
then_ = shortcut_cond_expr (expr);
then_se = then_ && TREE_SIDE_EFFECTS (then_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE);
}
}
if (!then_se)
{
/* If there is no 'then', turn
if (a || b); else d
into
if (a); else if (b); else d. */
while (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
else_ = shortcut_cond_expr (expr);
else_se = else_ && TREE_SIDE_EFFECTS (else_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_);
}
}
/* If we're done, great. */
if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR
&& TREE_CODE (pred) != TRUTH_ORIF_EXPR)
return expr;
/* Otherwise we need to mess with gotos. Change
if (a) c; else d;
to
if (a); else goto no;
c; goto end;
no: d; end:
and recursively gimplify the condition. */
true_label = false_label = end_label = NULL_TREE;
/* If our arms just jump somewhere, hijack those labels so we don't
generate jumps to jumps. */
if (then_
&& TREE_CODE (then_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL)
{
true_label = GOTO_DESTINATION (then_);
then_ = NULL;
then_se = false;
}
if (else_
&& TREE_CODE (else_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL)
{
false_label = GOTO_DESTINATION (else_);
else_ = NULL;
else_se = false;
}
/* If we aren't hijacking a label for the 'then' branch, it falls through. */
if (true_label)
true_label_p = &true_label;
else
true_label_p = NULL;
/* The 'else' branch also needs a label if it contains interesting code. */
if (false_label || else_se)
false_label_p = &false_label;
else
false_label_p = NULL;
/* If there was nothing else in our arms, just forward the label(s). */
if (!then_se && !else_se)
return shortcut_cond_r (pred, true_label_p, false_label_p);
/* If our last subexpression already has a terminal label, reuse it. */
if (else_se)
expr = expr_last (else_);
else if (then_se)
expr = expr_last (then_);
else
expr = NULL;
if (expr && TREE_CODE (expr) == LABEL_EXPR)
end_label = LABEL_EXPR_LABEL (expr);
/* If we don't care about jumping to the 'else' branch, jump to the end
if the condition is false. */
if (!false_label_p)
false_label_p = &end_label;
/* We only want to emit these labels if we aren't hijacking them. */
emit_end = (end_label == NULL_TREE);
emit_false = (false_label == NULL_TREE);
/* We only emit the jump over the else clause if we have to--if the
then clause may fall through. Otherwise we can wind up with a
useless jump and a useless label at the end of gimplified code,
which will cause us to think that this conditional as a whole
falls through even if it doesn't. If we then inline a function
which ends with such a condition, that can cause us to issue an
inappropriate warning about control reaching the end of a
non-void function. */
jump_over_else = block_may_fallthru (then_);
pred = shortcut_cond_r (pred, true_label_p, false_label_p);
expr = NULL;
append_to_statement_list (pred, &expr);
append_to_statement_list (then_, &expr);
if (else_se)
{
if (jump_over_else)
{
t = build_and_jump (&end_label);
append_to_statement_list (t, &expr);
}
if (emit_false)
{
t = build1 (LABEL_EXPR, void_type_node, false_label);
append_to_statement_list (t, &expr);
}
append_to_statement_list (else_, &expr);
}
if (emit_end && end_label)
{
t = build1 (LABEL_EXPR, void_type_node, end_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */
tree
gimple_boolify (tree expr)
{
tree type = TREE_TYPE (expr);
if (TREE_CODE (expr) == NE_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR
&& integer_zerop (TREE_OPERAND (expr, 1)))
{
tree call = TREE_OPERAND (expr, 0);
tree fn = get_callee_fndecl (call);
/* For __builtin_expect ((long) (x), y) recurse into x as well
if x is truth_value_p. */
if (fn
&& DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT
&& call_expr_nargs (call) == 2)
{
tree arg = CALL_EXPR_ARG (call, 0);
if (arg)
{
if (TREE_CODE (arg) == NOP_EXPR
&& TREE_TYPE (arg) == TREE_TYPE (call))
arg = TREE_OPERAND (arg, 0);
if (truth_value_p (TREE_CODE (arg)))
{
arg = gimple_boolify (arg);
CALL_EXPR_ARG (call, 0)
= fold_convert (TREE_TYPE (call), arg);
}
}
}
}
if (TREE_CODE (type) == BOOLEAN_TYPE)
return expr;
switch (TREE_CODE (expr))
{
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Also boolify the arguments of truth exprs. */
TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1));
/* FALLTHRU */
case TRUTH_NOT_EXPR:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* FALLTHRU */
case EQ_EXPR: case NE_EXPR:
case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR:
/* These expressions always produce boolean results. */
TREE_TYPE (expr) = boolean_type_node;
return expr;
default:
/* Other expressions that get here must have boolean values, but
might need to be converted to the appropriate mode. */
return fold_convert (boolean_type_node, expr);
}
}
/* Given a conditional expression *EXPR_P without side effects, gimplify
its operands. New statements are inserted to PRE_P. */
static enum gimplify_status
gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, cond;
enum gimplify_status ret, tret;
enum tree_code code;
cond = gimple_boolify (COND_EXPR_COND (expr));
/* We need to handle && and || specially, as their gimplification
creates pure cond_expr, thus leading to an infinite cycle otherwise. */
code = TREE_CODE (cond);
if (code == TRUTH_ANDIF_EXPR)
TREE_SET_CODE (cond, TRUTH_AND_EXPR);
else if (code == TRUTH_ORIF_EXPR)
TREE_SET_CODE (cond, TRUTH_OR_EXPR);
ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue);
COND_EXPR_COND (*expr_p) = cond;
tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
return MIN (ret, tret);
}
/* Returns true if evaluating EXPR could trap.
EXPR is GENERIC, while tree_could_trap_p can be called
only on GIMPLE. */
static bool
generic_expr_could_trap_p (tree expr)
{
unsigned i, n;
if (!expr || is_gimple_val (expr))
return false;
if (!EXPR_P (expr) || tree_could_trap_p (expr))
return true;
n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
if (generic_expr_could_trap_p (TREE_OPERAND (expr, i)))
return true;
return false;
}
/* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;'
into
if (p) if (p)
t1 = a; a;
else or else
t1 = b; b;
t1;
The second form is used when *EXPR_P is of type void.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback)
{
tree expr = *expr_p;
tree tmp, type, arm1, arm2;
enum gimplify_status ret;
tree label_true, label_false, label_cont;
bool have_then_clause_p, have_else_clause_p;
gimple gimple_cond;
enum tree_code pred_code;
gimple_seq seq = NULL;
type = TREE_TYPE (expr);
/* If this COND_EXPR has a value, copy the values into a temporary within
the arms. */
if (! VOID_TYPE_P (type))
{
tree result;
/* If an rvalue is ok or we do not require an lvalue, avoid creating
an addressable temporary. */
if (((fallback & fb_rvalue)
|| !(fallback & fb_lvalue))
&& !TREE_ADDRESSABLE (type))
{
if (gimplify_ctxp->allow_rhs_cond_expr
/* If either branch has side effects or could trap, it can't be
evaluated unconditionally. */
&& !TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 1))
&& !generic_expr_could_trap_p (TREE_OPERAND (*expr_p, 1))
&& !TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 2))
&& !generic_expr_could_trap_p (TREE_OPERAND (*expr_p, 2)))
return gimplify_pure_cond_expr (expr_p, pre_p);
result = tmp = create_tmp_var (TREE_TYPE (expr), "iftmp");
ret = GS_ALL_DONE;
}
else
{
tree type = build_pointer_type (TREE_TYPE (expr));
if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node)
TREE_OPERAND (expr, 1) =
build_fold_addr_expr (TREE_OPERAND (expr, 1));
if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node)
TREE_OPERAND (expr, 2) =
build_fold_addr_expr (TREE_OPERAND (expr, 2));
tmp = create_tmp_var (type, "iftmp");
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (expr, 0),
TREE_OPERAND (expr, 1), TREE_OPERAND (expr, 2));
result = build_fold_indirect_ref (tmp);
}
/* Build the then clause, 't1 = a;'. But don't build an assignment
if this branch is void; in C++ it can be, if it's a throw. */
if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node)
TREE_OPERAND (expr, 1)
= build2 (MODIFY_EXPR, TREE_TYPE (tmp), tmp, TREE_OPERAND (expr, 1));
/* Build the else clause, 't1 = b;'. */
if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node)
TREE_OPERAND (expr, 2)
= build2 (MODIFY_EXPR, TREE_TYPE (tmp), tmp, TREE_OPERAND (expr, 2));
TREE_TYPE (expr) = void_type_node;
recalculate_side_effects (expr);
/* Move the COND_EXPR to the prequeue. */
gimplify_stmt (&expr, pre_p);
*expr_p = result;
return GS_ALL_DONE;
}
/* Make sure the condition has BOOLEAN_TYPE. */
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* Break apart && and || conditions. */
if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR
|| TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR)
{
expr = shortcut_cond_expr (expr);
if (expr != *expr_p)
{
*expr_p = expr;
/* We can't rely on gimplify_expr to re-gimplify the expanded
form properly, as cleanups might cause the target labels to be
wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to
set up a conditional context. */
gimple_push_condition ();
gimplify_stmt (expr_p, &seq);
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
return GS_ALL_DONE;
}
}
/* Now do the normal gimplification. */
/* Gimplify condition. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr,
fb_rvalue);
if (ret == GS_ERROR)
return GS_ERROR;
gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE);
gimple_push_condition ();
have_then_clause_p = have_else_clause_p = false;
if (TREE_OPERAND (expr, 1) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 1)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 1)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 1))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 1))))
{
label_true = GOTO_DESTINATION (TREE_OPERAND (expr, 1));
have_then_clause_p = true;
}
else
label_true = create_artificial_label ();
if (TREE_OPERAND (expr, 2) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 2)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 2)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 2))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 2))))
{
label_false = GOTO_DESTINATION (TREE_OPERAND (expr, 2));
have_else_clause_p = true;
}
else
label_false = create_artificial_label ();
gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1,
&arm2);
gimple_cond = gimple_build_cond (pred_code, arm1, arm2, label_true,
label_false);
gimplify_seq_add_stmt (&seq, gimple_cond);
label_cont = NULL_TREE;
if (!have_then_clause_p)
{
/* For if (...) {} else { code; } put label_true after
the else block. */
if (TREE_OPERAND (expr, 1) == NULL_TREE
&& !have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE)
label_cont = label_true;
else
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_true));
have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq);
/* For if (...) { code; } else {} or
if (...) { code; } else goto label; or
if (...) { code; return; } else { ... }
label_cont isn't needed. */
if (!have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE
&& gimple_seq_may_fallthru (seq))
{
gimple g;
label_cont = create_artificial_label ();
g = gimple_build_goto (label_cont);
/* GIMPLE_COND's are very low level; they have embedded
gotos. This particular embedded goto should not be marked
with the location of the original COND_EXPR, as it would
correspond to the COND_EXPR's condition, not the ELSE or the
THEN arms. To avoid marking it with the wrong location, flag
it as "no location". */
gimple_set_do_not_emit_location (g);
gimplify_seq_add_stmt (&seq, g);
}
}
}
if (!have_else_clause_p)
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_false));
have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq);
}
if (label_cont)
gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont));
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
if (ret == GS_ERROR)
; /* Do nothing. */
else if (have_then_clause_p || have_else_clause_p)
ret = GS_ALL_DONE;
else
{
/* Both arms are empty; replace the COND_EXPR with its predicate. */
expr = TREE_OPERAND (expr, 0);
gimplify_stmt (&expr, pre_p);
}
*expr_p = NULL;
return ret;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memcpy. */
static enum gimplify_status
gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, to, to_ptr, from, from_ptr;
gimple gs;
to = TREE_OPERAND (*expr_p, 0);
from = TREE_OPERAND (*expr_p, 1);
from_ptr = build_fold_addr_expr (from);
gimplify_arg (&from_ptr, seq_p, EXPR_LOCATION (*expr_p));
to_ptr = build_fold_addr_expr (to);
gimplify_arg (&to_ptr, seq_p, EXPR_LOCATION (*expr_p));
t = implicit_built_in_decls[BUILT_IN_MEMCPY];
gs = gimple_build_call (t, 3, to_ptr, from_ptr, size);
if (want_value)
{
/* tmp = memcpy() */
t = create_tmp_var (TREE_TYPE (to_ptr), NULL);
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memset. In this case we know that the RHS is
a CONSTRUCTOR with an empty element list. */
static enum gimplify_status
gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, from, to, to_ptr;
gimple gs;
/* Assert our assumptions, to abort instead of producing wrong code
silently if they are not met. Beware that the RHS CONSTRUCTOR might
not be immediately exposed. */
from = TREE_OPERAND (*expr_p, 1);
if (TREE_CODE (from) == WITH_SIZE_EXPR)
from = TREE_OPERAND (from, 0);
gcc_assert (TREE_CODE (from) == CONSTRUCTOR
&& VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (from)));
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
to_ptr = build_fold_addr_expr (to);
gimplify_arg (&to_ptr, seq_p, EXPR_LOCATION (*expr_p));
t = implicit_built_in_decls[BUILT_IN_MEMSET];
gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size);
if (want_value)
{
/* tmp = memset() */
t = create_tmp_var (TREE_TYPE (to_ptr), NULL);
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree,
determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an
assignment. Returns non-null if we detect a potential overlap. */
struct gimplify_init_ctor_preeval_data
{
/* The base decl of the lhs object. May be NULL, in which case we
have to assume the lhs is indirect. */
tree lhs_base_decl;
/* The alias set of the lhs object. */
alias_set_type lhs_alias_set;
};
static tree
gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata)
{
struct gimplify_init_ctor_preeval_data *data
= (struct gimplify_init_ctor_preeval_data *) xdata;
tree t = *tp;
/* If we find the base object, obviously we have overlap. */
if (data->lhs_base_decl == t)
return t;
/* If the constructor component is indirect, determine if we have a
potential overlap with the lhs. The only bits of information we
have to go on at this point are addressability and alias sets. */
if (TREE_CODE (t) == INDIRECT_REF
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t)))
return t;
/* If the constructor component is a call, determine if it can hide a
potential overlap with the lhs through an INDIRECT_REF like above. */
if (TREE_CODE (t) == CALL_EXPR)
{
tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t)));
for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type))
if (POINTER_TYPE_P (TREE_VALUE (type))
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set,
get_alias_set
(TREE_TYPE (TREE_VALUE (type)))))
return t;
}
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL;
}
/* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR,
force values that overlap with the lhs (as described by *DATA)
into temporaries. */
static void
gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
struct gimplify_init_ctor_preeval_data *data)
{
enum gimplify_status one;
/* If the value is constant, then there's nothing to pre-evaluate. */
if (TREE_CONSTANT (*expr_p))
{
/* Ensure it does not have side effects, it might contain a reference to
the object we're initializing. */
gcc_assert (!TREE_SIDE_EFFECTS (*expr_p));
return;
}
/* If the type has non-trivial constructors, we can't pre-evaluate. */
if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p)))
return;
/* Recurse for nested constructors. */
if (TREE_CODE (*expr_p) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p);
for (ix = 0; VEC_iterate (constructor_elt, v, ix, ce); ix++)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* Gimplify the constructor element to something appropriate for the rhs
of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know
the gimplifier will consider this a store to memory. Doing this
gimplification now means that we won't have to deal with complicated
language-specific trees, nor trees like SAVE_EXPR that can induce
exponential search behavior. */
one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue);
if (one == GS_ERROR)
{
*expr_p = NULL;
return;
}
/* If we gimplified to a bare decl, we can be sure that it doesn't overlap
with the lhs, since "a = { .x=a }" doesn't make sense. This will
always be true for all scalars, since is_gimple_mem_rhs insists on a
temporary variable for them. */
if (DECL_P (*expr_p))
return;
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL))
return;
/* ... and if found, force the value into a temporary. */
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
/* A subroutine of gimplify_init_ctor_eval. Create a loop for
a RANGE_EXPR in a CONSTRUCTOR for an array.
var = lower;
loop_entry:
object[var] = value;
if (var == upper)
goto loop_exit;
var = var + 1;
goto loop_entry;
loop_exit:
We increment var _after_ the loop exit check because we might otherwise
fail if upper == TYPE_MAX_VALUE (type for upper).
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *,
gimple_seq *, bool);
static void
gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
tree value, tree array_elt_type,
gimple_seq *pre_p, bool cleared)
{
tree loop_entry_label, loop_exit_label, fall_thru_label;
tree var, var_type, cref, tmp;
loop_entry_label = create_artificial_label ();
loop_exit_label = create_artificial_label ();
fall_thru_label = create_artificial_label ();
/* Create and initialize the index variable. */
var_type = TREE_TYPE (upper);
var = create_tmp_var (var_type, NULL);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower));
/* Add the loop entry label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label));
/* Build the reference. */
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
var, NULL_TREE, NULL_TREE);
/* If we are a constructor, just call gimplify_init_ctor_eval to do
the store. Otherwise just assign value to the reference. */
if (TREE_CODE (value) == CONSTRUCTOR)
/* NB we might have to call ourself recursively through
gimplify_init_ctor_eval if the value is a constructor. */
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value));
/* We exit the loop when the index var is equal to the upper bound. */
gimplify_seq_add_stmt (pre_p,
gimple_build_cond (EQ_EXPR, var, upper,
loop_exit_label, fall_thru_label));
gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label));
/* Otherwise, increment the index var... */
tmp = build2 (PLUS_EXPR, var_type, var,
fold_convert (var_type, integer_one_node));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp));
/* ...and jump back to the loop entry. */
gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label));
/* Add the loop exit label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label));
}
/* Return true if FDECL is accessing a field that is zero sized. */
static bool
zero_sized_field_decl (const_tree fdecl)
{
if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
static bool
zero_sized_type (const_tree type)
{
if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type)
&& integer_zerop (TYPE_SIZE (type)))
return true;
return false;
}
/* A subroutine of gimplify_init_constructor. Generate individual
MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the
assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the
CONSTRUCTOR. CLEARED is true if the entire LHS object has been
zeroed first. */
static void
gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts,
gimple_seq *pre_p, bool cleared)
{
tree array_elt_type = NULL;
unsigned HOST_WIDE_INT ix;
tree purpose, value;
if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE)
array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object)));
FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value)
{
tree cref;
/* NULL values are created above for gimplification errors. */
if (value == NULL)
continue;
if (cleared && initializer_zerop (value))
continue;
/* ??? Here's to hoping the front end fills in all of the indices,
so we don't have to figure out what's missing ourselves. */
gcc_assert (purpose);
/* Skip zero-sized fields, unless value has side-effects. This can
happen with calls to functions returning a zero-sized type, which
we shouldn't discard. As a number of downstream passes don't
expect sets of zero-sized fields, we rely on the gimplification of
the MODIFY_EXPR we make below to drop the assignment statement. */
if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose))
continue;
/* If we have a RANGE_EXPR, we have to build a loop to assign the
whole range. */
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
tree upper = TREE_OPERAND (purpose, 1);
/* If the lower bound is equal to upper, just treat it as if
upper was the index. */
if (simple_cst_equal (lower, upper))
purpose = upper;
else
{
gimplify_init_ctor_eval_range (object, lower, upper, value,
array_elt_type, pre_p, cleared);
continue;
}
}
if (array_elt_type)
{
/* Do not use bitsizetype for ARRAY_REF indices. */
if (TYPE_DOMAIN (TREE_TYPE (object)))
purpose = fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))),
purpose);
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
}
else
{
gcc_assert (TREE_CODE (purpose) == FIELD_DECL);
cref = build3 (COMPONENT_REF, TREE_TYPE (purpose),
unshare_expr (object), purpose, NULL_TREE);
}
if (TREE_CODE (value) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE)
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value);
gimplify_and_add (init, pre_p);
ggc_free (init);
}
}
}
/* Returns the appropriate RHS predicate for this LHS. */
gimple_predicate
rhs_predicate_for (tree lhs)
{
if (is_gimple_formal_tmp_var (lhs))
return is_gimple_formal_tmp_or_call_rhs;
else if (is_gimple_reg (lhs))
return is_gimple_reg_or_call_rhs;
else
return is_gimple_mem_or_call_rhs;
}
/* A subroutine of gimplify_modify_expr. Break out elements of a
CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs.
Note that we still need to clear any elements that don't have explicit
initializers, so if not all elements are initialized we keep the
original MODIFY_EXPR, we just remove all of the constructor elements.
If NOTIFY_TEMP_CREATION is true, do not gimplify, just return
GS_ERROR if we would have to create a temporary when gimplifying
this constructor. Otherwise, return GS_OK.
If NOTIFY_TEMP_CREATION is false, just do the gimplification. */
static enum gimplify_status
gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, bool notify_temp_creation)
{
tree object;
tree ctor = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (ctor);
enum gimplify_status ret;
VEC(constructor_elt,gc) *elts;
if (TREE_CODE (ctor) != CONSTRUCTOR)
return GS_UNHANDLED;
if (!notify_temp_creation)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
}
object = TREE_OPERAND (*expr_p, 0);
elts = CONSTRUCTOR_ELTS (ctor);
ret = GS_ALL_DONE;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
case ARRAY_TYPE:
{
struct gimplify_init_ctor_preeval_data preeval_data;
HOST_WIDE_INT num_type_elements, num_ctor_elements;
HOST_WIDE_INT num_nonzero_elements;
bool cleared, valid_const_initializer;
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
if (VEC_empty (constructor_elt, elts))
{
if (notify_temp_creation)
return GS_OK;
break;
}
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
valid_const_initializer
= categorize_ctor_elements (ctor, &num_nonzero_elements,
&num_ctor_elements, &cleared);
/* If a const aggregate variable is being initialized, then it
should never be a lose to promote the variable to be static. */
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
&& TREE_CODE (object) == VAR_DECL
&& (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)))
{
if (notify_temp_creation)
return GS_ERROR;
DECL_INITIAL (object) = ctor;
TREE_STATIC (object) = 1;
if (!DECL_NAME (object))
DECL_NAME (object) = create_tmp_var_name ("C");
walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL);
/* ??? C++ doesn't automatically append a .<number> to the
assembler name, and even when it does, it looks a FE private
data structures to figure out what that number should be,
which are not set for this variable. I suppose this is
important for local statics for inline functions, which aren't
"local" in the object file sense. So in order to get a unique
TU-local symbol, we must invoke the lhd version now. */
lhd_set_decl_assembler_name (object);
*expr_p = NULL_TREE;
break;
}
/* If there are "lots" of initialized elements, even discounting
those that are not address constants (and thus *must* be
computed at runtime), then partition the constructor into
constant and non-constant parts. Block copy the constant
parts in, then generate code for the non-constant parts. */
/* TODO. There's code in cp/typeck.c to do this. */
num_type_elements = count_type_elements (type, true);
/* If count_type_elements could not determine number of type elements
for a constant-sized object, assume clearing is needed.
Don't do this for variable-sized objects, as store_constructor
will ignore the clearing of variable-sized objects. */
if (num_type_elements < 0 && int_size_in_bytes (type) >= 0)
cleared = true;
/* If there are "lots" of zeros, then block clear the object first. */
else if (num_type_elements - num_nonzero_elements
> CLEAR_RATIO (optimize_function_for_speed_p (cfun))
&& num_nonzero_elements < num_type_elements/4)
cleared = true;
/* ??? This bit ought not be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
else if (num_ctor_elements < num_type_elements)
cleared = true;
/* If there are "lots" of initialized elements, and all of them
are valid address constants, then the entire initializer can
be dropped to memory, and then memcpy'd out. Don't do this
for sparse arrays, though, as it's more efficient to follow
the standard CONSTRUCTOR behavior of memset followed by
individual element initialization. Also don't do this for small
all-zero initializers (which aren't big enough to merit
clearing), and don't try to make bitwise copies of
TREE_ADDRESSABLE types. */
if (valid_const_initializer
&& !(cleared || num_nonzero_elements == 0)
&& !TREE_ADDRESSABLE (type))
{
HOST_WIDE_INT size = int_size_in_bytes (type);
unsigned int align;
/* ??? We can still get unbounded array types, at least
from the C++ front end. This seems wrong, but attempt
to work around it for now. */
if (size < 0)
{
size = int_size_in_bytes (TREE_TYPE (object));
if (size >= 0)
TREE_TYPE (ctor) = type = TREE_TYPE (object);
}
/* Find the maximum alignment we can assume for the object. */
/* ??? Make use of DECL_OFFSET_ALIGN. */
if (DECL_P (object))
align = DECL_ALIGN (object);
else
align = TYPE_ALIGN (type);
if (size > 0
&& num_nonzero_elements > 1
&& !can_move_by_pieces (size, align))
{
tree new_tree;
if (notify_temp_creation)
return GS_ERROR;
new_tree = create_tmp_var_raw (type, "C");
gimple_add_tmp_var (new_tree);
TREE_STATIC (new_tree) = 1;
TREE_READONLY (new_tree) = 1;
DECL_INITIAL (new_tree) = ctor;
if (align > DECL_ALIGN (new_tree))
{
DECL_ALIGN (new_tree) = align;
DECL_USER_ALIGN (new_tree) = 1;
}
walk_tree (&DECL_INITIAL (new_tree), force_labels_r, NULL, NULL);
TREE_OPERAND (*expr_p, 1) = new_tree;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
pretend we didn't do anything here to let that happen. */
return GS_UNHANDLED;
}
}
/* If the target is volatile, we have non-zero elements and more than
one field to assign, initialize the target from a temporary. */
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& num_nonzero_elements > 0
&& VEC_length (constructor_elt, elts) > 1)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type), NULL);
TREE_OPERAND (*expr_p, 0) = temp;
*expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
*expr_p,
build2 (MODIFY_EXPR, void_type_node,
object, temp));
return GS_OK;
}
if (notify_temp_creation)
return GS_OK;
/* If there are nonzero elements, pre-evaluate to capture elements
overlapping with the lhs into temporaries. We must do this before
clearing to fetch the values before they are zeroed-out. */
if (num_nonzero_elements > 0)
{
preeval_data.lhs_base_decl = get_base_address (object);
if (!DECL_P (preeval_data.lhs_base_decl))
preeval_data.lhs_base_decl = NULL;
preeval_data.lhs_alias_set = get_alias_set (object);
gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1),
pre_p, post_p, &preeval_data);
}
if (cleared)
{
/* Zap the CONSTRUCTOR element list, which simplifies this case.
Note that we still have to gimplify, in order to handle the
case of variable sized types. Avoid shared tree structures. */
CONSTRUCTOR_ELTS (ctor) = NULL;
TREE_SIDE_EFFECTS (ctor) = 0;
object = unshare_expr (object);
gimplify_stmt (expr_p, pre_p);
}
/* If we have not block cleared the object, or if there are nonzero
elements in the constructor, add assignments to the individual
scalar fields of the object. */
if (!cleared || num_nonzero_elements > 0)
gimplify_init_ctor_eval (object, elts, pre_p, cleared);
*expr_p = NULL_TREE;
}
break;
case COMPLEX_TYPE:
{
tree r, i;
if (notify_temp_creation)
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
gcc_assert (VEC_length (constructor_elt, elts) == 2);
r = VEC_index (constructor_elt, elts, 0)->value;
i = VEC_index (constructor_elt, elts, 1)->value;
if (r == NULL || i == NULL)
{
tree zero = fold_convert (TREE_TYPE (type), integer_zero_node);
if (r == NULL)
r = zero;
if (i == NULL)
i = zero;
}
/* Complex types have either COMPLEX_CST or COMPLEX_EXPR to
represent creation of a complex value. */
if (TREE_CONSTANT (r) && TREE_CONSTANT (i))
{
ctor = build_complex (type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
}
else
{
ctor = build2 (COMPLEX_EXPR, type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1),
pre_p,
post_p,
rhs_predicate_for (TREE_OPERAND (*expr_p, 0)),
fb_rvalue);
}
}
break;
case VECTOR_TYPE:
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
if (notify_temp_creation)
return GS_OK;
/* Go ahead and simplify constant constructors to VECTOR_CST. */
if (TREE_CONSTANT (ctor))
{
bool constant_p = true;
tree value;
/* Even when ctor is constant, it might contain non-*_CST
elements, such as addresses or trapping values like
1.0/0.0 - 1.0/0.0. Such expressions don't belong
in VECTOR_CST nodes. */
FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
{
TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts);
break;
}
/* Don't reduce an initializer constant even if we can't
make a VECTOR_CST. It won't do anything for us, and it'll
prevent us from representing it as a single constant. */
if (initializer_constant_valid_p (ctor, type))
break;
TREE_CONSTANT (ctor) = 0;
}
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
for (ix = 0; VEC_iterate (constructor_elt, elts, ix, ce); ix++)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val,
fb_rvalue);
if (tret == GS_ERROR)
ret = GS_ERROR;
}
if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0)))
TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p);
}
break;
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
gcc_unreachable ();
}
if (ret == GS_ERROR)
return GS_ERROR;
else if (want_value)
{
*expr_p = object;
return GS_OK;
}
else
{
/* If we have gimplified both sides of the initializer but have
not emitted an assignment, do so now. */
if (*expr_p)
{
tree lhs = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
gimple init = gimple_build_assign (lhs, rhs);
gimplify_seq_add_stmt (pre_p, init);
*expr_p = NULL;
}
return GS_ALL_DONE;
}
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. Note that the resulting type may be different from
the type pointed to in the sense that it is still compatible
from the langhooks point of view. */
tree
gimple_fold_indirect_ref (tree t)
{
tree type = TREE_TYPE (TREE_TYPE (t));
tree sub = t;
tree subtype;
STRIP_USELESS_TYPE_CONVERSION (sub);
subtype = TREE_TYPE (sub);
if (!POINTER_TYPE_P (subtype))
return NULL_TREE;
if (TREE_CODE (sub) == ADDR_EXPR)
{
tree op = TREE_OPERAND (sub, 0);
tree optype = TREE_TYPE (op);
/* *&p => p */
if (useless_type_conversion_p (type, optype))
return op;
/* *(foo *)&fooarray => fooarray[0] */
if (TREE_CODE (optype) == ARRAY_TYPE
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (optype))) == INTEGER_CST
&& useless_type_conversion_p (type, TREE_TYPE (optype)))
{
tree type_domain = TYPE_DOMAIN (optype);
tree min_val = size_zero_node;
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
if (TREE_CODE (min_val) == INTEGER_CST)
return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE);
}
}
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (subtype)))) == INTEGER_CST
&& useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (subtype))))
{
tree type_domain;
tree min_val = size_zero_node;
tree osub = sub;
sub = gimple_fold_indirect_ref (sub);
if (! sub)
sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub);
type_domain = TYPE_DOMAIN (TREE_TYPE (sub));
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
if (TREE_CODE (min_val) == INTEGER_CST)
return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE);
}
return NULL_TREE;
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. This may only be applied to a rhs of an expression.
Note that the resulting type may be different from the type pointed
to in the sense that it is still compatible from the langhooks
point of view. */
static tree
gimple_fold_indirect_ref_rhs (tree t)
{
return gimple_fold_indirect_ref (t);
}
/* Subroutine of gimplify_modify_expr to do simplifications of
MODIFY_EXPRs based on the code of the RHS. We loop for as long as
something changes. */
static enum gimplify_status
gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum gimplify_status ret = GS_OK;
while (ret != GS_UNHANDLED)
switch (TREE_CODE (*from_p))
{
case VAR_DECL:
/* If we're assigning from a read-only variable initialized with
a constructor, do the direct assignment from the constructor,
but only if neither source nor target are volatile since this
latter assignment might end up being done on a per-field basis. */
if (DECL_INITIAL (*from_p)
&& TREE_READONLY (*from_p)
&& !TREE_THIS_VOLATILE (*from_p)
&& !TREE_THIS_VOLATILE (*to_p)
&& TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR)
{
tree old_from = *from_p;
/* Move the constructor into the RHS. */
*from_p = unshare_expr (DECL_INITIAL (*from_p));
/* Let's see if gimplify_init_constructor will need to put
it in memory. If so, revert the change. */
ret = gimplify_init_constructor (expr_p, NULL, NULL, false, true);
if (ret == GS_ERROR)
{
*from_p = old_from;
/* Fall through. */
}
else
{
ret = GS_OK;
break;
}
}
ret = GS_UNHANDLED;
break;
case INDIRECT_REF:
{
/* If we have code like
*(const A*)(A*)&x
where the type of "x" is a (possibly cv-qualified variant
of "A"), treat the entire expression as identical to "x".
This kind of code arises in C++ when an object is bound
to a const reference, and if "x" is a TARGET_EXPR we want
to take advantage of the optimization below. */
tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0));
if (t)
{
*from_p = t;
ret = GS_OK;
}
else
ret = GS_UNHANDLED;
break;
}
case TARGET_EXPR:
{
/* If we are initializing something from a TARGET_EXPR, strip the
TARGET_EXPR and initialize it directly, if possible. This can't
be done if the initializer is void, since that implies that the
temporary is set in some non-trivial way.
??? What about code that pulls out the temp and uses it
elsewhere? I think that such code never uses the TARGET_EXPR as
an initializer. If I'm wrong, we'll die because the temp won't
have any RTL. In that case, I guess we'll need to replace
references somehow. */
tree init = TARGET_EXPR_INITIAL (*from_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
{
*from_p = init;
ret = GS_OK;
}
else
ret = GS_UNHANDLED;
}
break;
case COMPOUND_EXPR:
/* Remove any COMPOUND_EXPR in the RHS so the following cases will be
caught. */
gimplify_compound_expr (from_p, pre_p, true);
ret = GS_OK;
break;
case CONSTRUCTOR:
/* If we're initializing from a CONSTRUCTOR, break this into
individual MODIFY_EXPRs. */
return gimplify_init_constructor (expr_p, pre_p, post_p, want_value,
false);
case COND_EXPR:
/* If we're assigning to a non-register type, push the assignment
down into the branches. This is mandatory for ADDRESSABLE types,
since we cannot generate temporaries for such, but it saves a
copy in other cases as well. */
if (!is_gimple_reg_type (TREE_TYPE (*from_p)))
{
/* This code should mirror the code in gimplify_cond_expr. */
enum tree_code code = TREE_CODE (*expr_p);
tree cond = *from_p;
tree result = *to_p;
ret = gimplify_expr (&result, pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
TREE_OPERAND (cond, 1));
if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_OPERAND (cond, 2)
= build2 (code, void_type_node, unshare_expr (result),
TREE_OPERAND (cond, 2));
TREE_TYPE (cond) = void_type_node;
recalculate_side_effects (cond);
if (want_value)
{
gimplify_and_add (cond, pre_p);
*expr_p = unshare_expr (result);
}
else
*expr_p = cond;
return ret;
}
else
ret = GS_UNHANDLED;
break;
case CALL_EXPR:
/* For calls that return in memory, give *to_p as the CALL_EXPR's
return slot so that we don't generate a temporary. */
if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p)
&& aggregate_value_p (*from_p, *from_p))
{
bool use_target;
if (!(rhs_predicate_for (*to_p))(*from_p))
/* If we need a temporary, *to_p isn't accurate. */
use_target = false;
else if (TREE_CODE (*to_p) == RESULT_DECL
&& DECL_NAME (*to_p) == NULL_TREE
&& needs_to_live_in_memory (*to_p))
/* It's OK to use the return slot directly unless it's an NRV. */
use_target = true;
else if (is_gimple_reg_type (TREE_TYPE (*to_p))
|| (DECL_P (*to_p) && DECL_REGISTER (*to_p)))
/* Don't force regs into memory. */
use_target = false;
else if (TREE_CODE (*to_p) == VAR_DECL
&& DECL_GIMPLE_FORMAL_TEMP_P (*to_p))
/* Don't use the original target if it's a formal temp; we
don't want to take their addresses. */
use_target = false;
else if (TREE_CODE (*expr_p) == INIT_EXPR)
/* It's OK to use the target directly if it's being
initialized. */
use_target = true;
else if (!is_gimple_non_addressable (*to_p))
/* Don't use the original target if it's already addressable;
if its address escapes, and the called function uses the
NRV optimization, a conforming program could see *to_p
change before the called function returns; see c++/19317.
When optimizing, the return_slot pass marks more functions
as safe after we have escape info. */
use_target = false;
else
use_target = true;
if (use_target)
{
CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1;
mark_addressable (*to_p);
}
}
ret = GS_UNHANDLED;
break;
/* If we're initializing from a container, push the initialization
inside it. */
case CLEANUP_POINT_EXPR:
case BIND_EXPR:
case STATEMENT_LIST:
{
tree wrap = *from_p;
tree t;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval,
fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
t = voidify_wrapper_expr (wrap, *expr_p);
gcc_assert (t == *expr_p);
if (want_value)
{
gimplify_and_add (wrap, pre_p);
*expr_p = unshare_expr (*to_p);
}
else
*expr_p = wrap;
return GS_OK;
}
default:
ret = GS_UNHANDLED;
break;
}
return ret;
}
/* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is
a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with
DECL_GIMPLE_REG_P set.
IMPORTANT NOTE: This promotion is performed by introducing a load of the
other, unmodified part of the complex object just before the total store.
As a consequence, if the object is still uninitialized, an undefined value
will be loaded into a register, which may result in a spurious exception
if the register is floating-point and the value happens to be a signaling
NaN for example. Then the fully-fledged complex operations lowering pass
followed by a DCE pass are necessary in order to fix things up. */
static enum gimplify_status
gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p,
bool want_value)
{
enum tree_code code, ocode;
tree lhs, rhs, new_rhs, other, realpart, imagpart;
lhs = TREE_OPERAND (*expr_p, 0);
rhs = TREE_OPERAND (*expr_p, 1);
code = TREE_CODE (lhs);
lhs = TREE_OPERAND (lhs, 0);
ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR;
other = build1 (ocode, TREE_TYPE (rhs), lhs);
other = get_formal_tmp_var (other, pre_p);
realpart = code == REALPART_EXPR ? rhs : other;
imagpart = code == REALPART_EXPR ? other : rhs;
if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart))
new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart);
else
new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs));
*expr_p = (want_value) ? rhs : NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify the MODIFY_EXPR node pointed to by EXPR_P.
modify_expr
: varname '=' rhs
| '*' ID '=' rhs
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
tree *from_p = &TREE_OPERAND (*expr_p, 1);
tree *to_p = &TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
gimple assign;
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == INIT_EXPR);
/* Insert pointer conversions required by the middle-end that are not
required by the frontend. This fixes middle-end type checking for
for example gcc.dg/redecl-6.c. */
if (POINTER_TYPE_P (TREE_TYPE (*to_p))
&& lang_hooks.types_compatible_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p)))
{
STRIP_USELESS_TYPE_CONVERSION (*from_p);
if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p)))
*from_p = fold_convert (TREE_TYPE (*to_p), *from_p);
}
/* See if any simplifications can be done based on what the RHS is. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* For zero sized types only gimplify the left hand side and right hand
side as statements and throw away the assignment. Do this after
gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
types properly. */
if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value)
{
gimplify_stmt (from_p, pre_p);
gimplify_stmt (to_p, pre_p);
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* If the value being copied is of variable width, compute the length
of the copy into a WITH_SIZE_EXPR. Note that we need to do this
before gimplifying any of the operands so that we can resolve any
PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses
the size of the expression to be copied, not of the destination, so
that is what we must do here. */
maybe_with_size_expr (from_p);
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* As a special case, we have to temporarily allow for assignments
with a CALL_EXPR on the RHS. Since in GIMPLE a function call is
a toplevel statement, when gimplifying the GENERIC expression
MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple
GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>.
Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To
prevent gimplify_expr from trying to create a new temporary for
foo's LHS, we tell it that it should only gimplify until it
reaches the CALL_EXPR. On return from gimplify_expr, the newly
created GIMPLE_CALL <foo> will be the last statement in *PRE_P
and all we need to do here is set 'a' to be its LHS. */
ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p),
fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Now see if the above changed *from_p to something we handle specially. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* If we've got a variable sized assignment between two lvalues (i.e. does
not involve a call), then we can make things a bit more straightforward
by converting the assignment to memcpy or memset. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree from = TREE_OPERAND (*from_p, 0);
tree size = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p);
if (is_gimple_addressable (from))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value,
pre_p);
}
}
/* Transform partial stores to non-addressable complex variables into
total stores. This allows us to use real instead of virtual operands
for these variables, which improves optimization. */
if ((TREE_CODE (*to_p) == REALPART_EXPR
|| TREE_CODE (*to_p) == IMAGPART_EXPR)
&& is_gimple_reg (TREE_OPERAND (*to_p, 0)))
return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value);
/* Try to alleviate the effects of the gimplification creating artificial
temporaries (see for example is_gimple_reg_rhs) on the debug info. */
if (!gimplify_ctxp->into_ssa
&& DECL_P (*from_p)
&& DECL_IGNORED_P (*from_p)
&& DECL_P (*to_p)
&& !DECL_IGNORED_P (*to_p))
{
if (!DECL_NAME (*from_p) && DECL_NAME (*to_p))
DECL_NAME (*from_p)
= create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p)));
DECL_DEBUG_EXPR_IS_FROM (*from_p) = 1;
SET_DECL_DEBUG_EXPR (*from_p, *to_p);
}
if (TREE_CODE (*from_p) == CALL_EXPR)
{
/* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL
instead of a GIMPLE_ASSIGN. */
assign = gimple_build_call_from_tree (*from_p);
gimple_call_set_lhs (assign, *to_p);
}
else
assign = gimple_build_assign (*to_p, *from_p);
gimplify_seq_add_stmt (pre_p, assign);
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
{
/* If we've somehow already got an SSA_NAME on the LHS, then
we've probably modified it twice. Not good. */
gcc_assert (TREE_CODE (*to_p) != SSA_NAME);
*to_p = make_ssa_name (*to_p, assign);
gimple_set_lhs (assign, *to_p);
}
if (want_value)
{
*expr_p = unshare_expr (*to_p);
return GS_OK;
}
else
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a comparison between two variable-sized objects. Do this
with a call to BUILT_IN_MEMCMP. */
static enum gimplify_status
gimplify_variable_sized_compare (tree *expr_p)
{
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree t, arg, dest, src;
arg = TYPE_SIZE_UNIT (TREE_TYPE (op0));
arg = unshare_expr (arg);
arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0);
src = build_fold_addr_expr (op1);
dest = build_fold_addr_expr (op0);
t = implicit_built_in_decls[BUILT_IN_MEMCMP];
t = build_call_expr (t, 3, dest, src, arg);
*expr_p
= build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node);
return GS_OK;
}
/* Gimplify a comparison between two aggregate objects of integral scalar
mode as a comparison between the bitwise equivalent scalar values. */
static enum gimplify_status
gimplify_scalar_mode_aggregate_compare (tree *expr_p)
{
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (op0);
tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1);
op0 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op0);
op1 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op1);
*expr_p
= fold_build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1);
return GS_OK;
}
/* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P
points to the expression to gimplify.
Expressions of the form 'a && b' are gimplified to:
a && b ? true : false
gimplify_cond_expr will do the rest.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_boolean_expr (tree *expr_p)
{
/* Preserve the original type of the expression. */
tree type = TREE_TYPE (*expr_p);
*expr_p = build3 (COND_EXPR, type, *expr_p,
fold_convert (type, boolean_true_node),
fold_convert (type, boolean_false_node));
return GS_OK;
}
/* Gimplifies an expression sequence. This function gimplifies each
expression and re-writes the original expression with the last
expression of the sequence in GIMPLE form.
PRE_P points to the list where the side effects for all the
expressions in the sequence will be emitted.
WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */
static enum gimplify_status
gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree t = *expr_p;
do
{
tree *sub_p = &TREE_OPERAND (t, 0);
if (TREE_CODE (*sub_p) == COMPOUND_EXPR)
gimplify_compound_expr (sub_p, pre_p, false);
else
gimplify_stmt (sub_p, pre_p);
t = TREE_OPERAND (t, 1);
}
while (TREE_CODE (t) == COMPOUND_EXPR);
*expr_p = t;
if (want_value)
return GS_OK;
else
{
gimplify_stmt (expr_p, pre_p);
return GS_ALL_DONE;
}
}
/* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to
gimplify. After gimplification, EXPR_P will point to a new temporary
that holds the original value of the SAVE_EXPR node.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
enum gimplify_status ret = GS_ALL_DONE;
tree val;
gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
if (!SAVE_EXPR_RESOLVED_P (*expr_p))
{
/* The operand may be a void-valued expression such as SAVE_EXPRs
generated by the Java frontend for class initialization. It is
being executed only for its side-effects. */
if (TREE_TYPE (val) == void_type_node)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_stmt, fb_none);
val = NULL;
}
else
val = get_initialized_tmp_var (val, pre_p, post_p);
TREE_OPERAND (*expr_p, 0) = val;
SAVE_EXPR_RESOLVED_P (*expr_p) = 1;
}
*expr_p = val;
return ret;
}
/* Re-write the ADDR_EXPR node pointed to by EXPR_P
unary_expr
: ...
| '&' varname
...
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr = *expr_p;
tree op0 = TREE_OPERAND (expr, 0);
enum gimplify_status ret;
switch (TREE_CODE (op0))
{
case INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
do_indirect_ref:
/* Check if we are dealing with an expression of the form '&*ptr'.
While the front end folds away '&*ptr' into 'ptr', these
expressions may be generated internally by the compiler (e.g.,
builtins like __builtin_va_end). */
/* Caution: the silent array decomposition semantics we allow for
ADDR_EXPR means we can't always discard the pair. */
/* Gimplification of the ADDR_EXPR operand may drop
cv-qualification conversions, so make sure we add them if
needed. */
{
tree op00 = TREE_OPERAND (op0, 0);
tree t_expr = TREE_TYPE (expr);
tree t_op00 = TREE_TYPE (op00);
if (!useless_type_conversion_p (t_expr, t_op00))
op00 = fold_convert (TREE_TYPE (expr), op00);
*expr_p = op00;
ret = GS_OK;
}
break;
case VIEW_CONVERT_EXPR:
/* Take the address of our operand and then convert it to the type of
this ADDR_EXPR.
??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at
all clear. The impact of this transformation is even less clear. */
/* If the operand is a useless conversion, look through it. Doing so
guarantees that the ADDR_EXPR and its operand will remain of the
same type. */
if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0)))
op0 = TREE_OPERAND (op0, 0);
*expr_p = fold_convert (TREE_TYPE (expr),
build_fold_addr_expr (TREE_OPERAND (op0, 0)));
ret = GS_OK;
break;
default:
/* We use fb_either here because the C frontend sometimes takes
the address of a call that returns a struct; see
gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make
the implied temporary explicit. */
/* Mark the RHS addressable. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p,
is_gimple_addressable, fb_either);
if (ret == GS_ERROR)
break;
/* We cannot rely on making the RHS addressable if it is
a temporary created by gimplification. In this case create a
new temporary that is initialized by a copy (which will
become a store after we mark it addressable).
This mostly happens if the frontend passed us something that
it could not mark addressable yet, like a fortran
pass-by-reference parameter (int) floatvar. */
if (is_gimple_formal_tmp_var (TREE_OPERAND (expr, 0)))
TREE_OPERAND (expr, 0)
= get_initialized_tmp_var (TREE_OPERAND (expr, 0), pre_p, post_p);
op0 = TREE_OPERAND (expr, 0);
/* For various reasons, the gimplification of the expression
may have made a new INDIRECT_REF. */
if (TREE_CODE (op0) == INDIRECT_REF)
goto do_indirect_ref;
/* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */
recompute_tree_invariant_for_addr_expr (expr);
mark_addressable (TREE_OPERAND (expr, 0));
break;
}
return ret;
}
/* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple
value; output operands should be a gimple lvalue. */
static enum gimplify_status
gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr;
int noutputs;
const char **oconstraints;
int i;
tree link;
const char *constraint;
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
gimple stmt;
VEC(tree, gc) *inputs;
VEC(tree, gc) *outputs;
VEC(tree, gc) *clobbers;
tree link_next;
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
inputs = outputs = clobbers = NULL;
ret = GS_ALL_DONE;
link_next = NULL_TREE;
for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next)
{
bool ok;
size_t constraint_len;
link_next = TREE_CHAIN (link);
oconstraints[i]
= constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
constraint_len = strlen (constraint);
if (constraint_len == 0)
continue;
ok = parse_output_constraint (&constraint, i, 0, 0,
&allows_mem, &allows_reg, &is_inout);
if (!ok)
{
ret = GS_ERROR;
is_inout = false;
}
if (!allows_reg && allows_mem)
mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
if (tret == GS_ERROR)
{
error ("invalid lvalue in asm output %d", i);
ret = tret;
}
VEC_safe_push (tree, gc, outputs, link);
TREE_CHAIN (link) = NULL_TREE;
if (is_inout)
{
/* An input/output operand. To give the optimizers more
flexibility, split it into separate input and output
operands. */
tree input;
char buf[10];
/* Turn the in/out constraint into an output constraint. */
char *p = xstrdup (constraint);
p[0] = '=';
TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p);
/* And add a matching input constraint. */
if (allows_reg)
{
sprintf (buf, "%d", i);
/* If there are multiple alternatives in the constraint,
handle each of them individually. Those that allow register
will be replaced with operand number, the others will stay
unchanged. */
if (strchr (p, ',') != NULL)
{
size_t len = 0, buflen = strlen (buf);
char *beg, *end, *str, *dst;
for (beg = p + 1;;)
{
end = strchr (beg, ',');
if (end == NULL)
end = strchr (beg, '\0');
if ((size_t) (end - beg) < buflen)
len += buflen + 1;
else
len += end - beg + 1;
if (*end)
beg = end + 1;
else
break;
}
str = (char *) alloca (len);
for (beg = p + 1, dst = str;;)
{
const char *tem;
bool mem_p, reg_p, inout_p;
end = strchr (beg, ',');
if (end)
*end = '\0';
beg[-1] = '=';
tem = beg - 1;
parse_output_constraint (&tem, i, 0, 0,
&mem_p, ®_p, &inout_p);
if (dst != str)
*dst++ = ',';
if (reg_p)
{
memcpy (dst, buf, buflen);
dst += buflen;
}
else
{
if (end)
len = end - beg;
else
len = strlen (beg);
memcpy (dst, beg, len);
dst += len;
}
if (end)
beg = end + 1;
else
break;
}
*dst = '\0';
input = build_string (dst - str, str);
}
else
input = build_string (strlen (buf), buf);
}
else
input = build_string (constraint_len - 1, constraint + 1);
free (p);
input = build_tree_list (build_tree_list (NULL_TREE, input),
unshare_expr (TREE_VALUE (link)));
ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input);
}
}
link_next = NULL_TREE;
for (link = ASM_INPUTS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If we can't make copies, we can only accept memory. */
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory input %d must stay in memory", i);
return GS_ERROR;
}
}
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
tree inputv = TREE_VALUE (link);
STRIP_NOPS (inputv);
if (TREE_CODE (inputv) == PREDECREMENT_EXPR
|| TREE_CODE (inputv) == PREINCREMENT_EXPR
|| TREE_CODE (inputv) == POSTDECREMENT_EXPR
|| TREE_CODE (inputv) == POSTINCREMENT_EXPR)
TREE_VALUE (link) = error_mark_node;
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
if (EXPR_HAS_LOCATION (TREE_VALUE (link)))
input_location = EXPR_LOCATION (TREE_VALUE (link));
error ("memory input %d is not directly addressable", i);
ret = tret;
}
}
else
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_asm_val, fb_rvalue);
if (tret == GS_ERROR)
ret = tret;
}
TREE_CHAIN (link) = NULL_TREE;
VEC_safe_push (tree, gc, inputs, link);
}
for (link = ASM_CLOBBERS (expr); link; ++i, link = TREE_CHAIN (link))
VEC_safe_push (tree, gc, clobbers, link);
stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)),
inputs, outputs, clobbers);
gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr));
gimple_asm_set_input (stmt, ASM_INPUT_P (expr));
gimplify_seq_add_stmt (pre_p, stmt);
return ret;
}
/* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding
GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while
gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we
return to this function.
FIXME should we complexify the prequeue handling instead? Or use flags
for all the cleanups and let the optimizer tighten them up? The current
code seems pretty fragile; it will break on a cleanup within any
non-conditional nesting. But any such nesting would be broken, anyway;
we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct
and continues out of it. We can do that at the RTL level, though, so
having an optimizer to tighten up try/finally regions would be a Good
Thing. */
static enum gimplify_status
gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
{
gimple_stmt_iterator iter;
gimple_seq body_sequence = NULL;
tree temp = voidify_wrapper_expr (*expr_p, NULL);
/* We only care about the number of conditions between the innermost
CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and
any cleanups collected outside the CLEANUP_POINT_EXPR. */
int old_conds = gimplify_ctxp->conditions;
gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups;
gimplify_ctxp->conditions = 0;
gimplify_ctxp->conditional_cleanups = NULL;
gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence);
gimplify_ctxp->conditions = old_conds;
gimplify_ctxp->conditional_cleanups = old_cleanups;
for (iter = gsi_start (body_sequence); !gsi_end_p (iter); )
{
gimple wce = gsi_stmt (iter);
if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR)
{
if (gsi_one_before_end_p (iter))
{
/* Note that gsi_insert_seq_before and gsi_remove do not
scan operands, unlike some other sequence mutators. */
gsi_insert_seq_before_without_update (&iter,
gimple_wce_cleanup (wce),
GSI_SAME_STMT);
gsi_remove (&iter, true);
break;
}
else
{
gimple gtry;
gimple_seq seq;
enum gimple_try_flags kind;
if (gimple_wce_cleanup_eh_only (wce))
kind = GIMPLE_TRY_CATCH;
else
kind = GIMPLE_TRY_FINALLY;
seq = gsi_split_seq_after (iter);
gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
*gsi_stmt_ptr (&iter) = gtry;
iter = gsi_start (seq);
}
}
else
gsi_next (&iter);
}
gimplify_seq_add_seq (pre_p, body_sequence);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP
is the cleanup action required. EH_ONLY is true if the cleanup should
only be executed if an exception is thrown, not on normal exit. */
static void
gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p)
{
gimple wce;
gimple_seq cleanup_stmts = NULL;
/* Errors can result in improperly nested cleanups. Which results in
confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */
if (errorcount || sorrycount)
return;
if (gimple_conditional_context ())
{
/* If we're in a conditional context, this is more complex. We only
want to run the cleanup if we actually ran the initialization that
necessitates it, but we want to run it after the end of the
conditional context. So we wrap the try/finally around the
condition and use a flag to determine whether or not to actually
run the destructor. Thus
test ? f(A()) : 0
becomes (approximately)
flag = 0;
try {
if (test) { A::A(temp); flag = 1; val = f(temp); }
else { val = 0; }
} finally {
if (flag) A::~A(temp);
}
val
*/
tree flag = create_tmp_var (boolean_type_node, "cleanup");
gimple ffalse = gimple_build_assign (flag, boolean_false_node);
gimple ftrue = gimple_build_assign (flag, boolean_true_node);
cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL);
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
gimplify_seq_add_stmt (pre_p, ftrue);
/* Because of this manipulation, and the EH edges that jump
threading cannot redirect, the temporary (VAR) will appear
to be used uninitialized. Don't warn. */
TREE_NO_WARNING (var) = 1;
}
else
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimple_wce_set_cleanup_eh_only (wce, eh_only);
gimplify_seq_add_stmt (pre_p, wce);
}
}
/* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */
static enum gimplify_status
gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree targ = *expr_p;
tree temp = TARGET_EXPR_SLOT (targ);
tree init = TARGET_EXPR_INITIAL (targ);
enum gimplify_status ret;
if (init)
{
/* TARGET_EXPR temps aren't part of the enclosing block, so add it
to the temps list. Handle also variable length TARGET_EXPRs. */
if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp)))
gimplify_type_sizes (TREE_TYPE (temp), pre_p);
gimplify_vla_decl (temp, pre_p);
}
else
gimple_add_tmp_var (temp);
/* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the
expression is supposed to initialize the slot. */
if (VOID_TYPE_P (TREE_TYPE (init)))
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
else
{
tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init);
init = init_expr;
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
init = NULL;
ggc_free (init_expr);
}
if (ret == GS_ERROR)
{
/* PR c++/28266 Make sure this is expanded only once. */
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
return GS_ERROR;
}
if (init)
gimplify_and_add (init, pre_p);
/* If needed, push the cleanup for the temp. */
if (TARGET_EXPR_CLEANUP (targ))
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
/* Only expand this once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
else
/* We should have expanded this before. */
gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
}
/* Gimplification of expression trees. */
/* Gimplify an expression which appears at statement context. The
corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is
NULL, a new sequence is allocated.
Return true if we actually added a statement to the queue. */
bool
gimplify_stmt (tree *stmt_p, gimple_seq *seq_p)
{
gimple_seq_node last;
if (!*seq_p)
*seq_p = gimple_seq_alloc ();
last = gimple_seq_last (*seq_p);
gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none);
return last != gimple_seq_last (*seq_p);
}
/* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels
to CTX. If entries already exist, force them to be some flavor of private.
If there is no enclosing parallel, do nothing. */
void
omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
if (decl == NULL || !DECL_P (decl))
return;
do
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN);
else
return;
}
else if (ctx->region_type != ORT_WORKSHARE)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
}
while (ctx);
}
/* Similarly for each of the type sizes of TYPE. */
static void
omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
{
if (type == NULL || type == error_mark_node)
return;
type = TYPE_MAIN_VARIANT (type);
if (pointer_set_insert (ctx->privatized_types, type))
return;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type));
omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type));
break;
case ARRAY_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type));
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field));
}
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
break;
default:
break;
}
omp_firstprivatize_variable (ctx, TYPE_SIZE (type));
omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type));
lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type);
}
/* Add an entry for DECL in the OpenMP context CTX with FLAGS. */
static void
omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
{
splay_tree_node n;
unsigned int nflags;
tree t;
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
return;
/* Never elide decls whose type has TREE_ADDRESSABLE set. This means
there are constructors involved somewhere. */
if (TREE_ADDRESSABLE (TREE_TYPE (decl))
|| TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))
flags |= GOVD_SEEN;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
/* We shouldn't be re-adding the decl with the same data
sharing class. */
gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0);
/* The only combination of data sharing classes we should see is
FIRSTPRIVATE and LASTPRIVATE. */
nflags = n->value | flags;
gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS)
== (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE));
n->value = nflags;
return;
}
/* When adding a variable-sized variable, we have to handle all sorts
of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* Add the pointer replacement variable as PRIVATE if the variable
replacement is private, else FIRSTPRIVATE since we'll need the
address of the original variable either for SHARED, or for the
copy into or out of the context. */
if (!(flags & GOVD_LOCAL))
{
nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE;
nflags |= flags & GOVD_SEEN;
t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
omp_add_variable (ctx, t, nflags);
}
/* Add all of the variable and type parameters (which should have
been gimplified to a formal temporary) as FIRSTPRIVATE. */
omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl));
omp_firstprivatize_variable (ctx, DECL_SIZE (decl));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* The variable-sized variable itself is never SHARED, only some form
of PRIVATE. The sharing would take place via the pointer variable
which we remapped above. */
if (flags & GOVD_SHARED)
flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
/* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
For local variables TYPE_SIZE_UNIT might not be gimplified yet,
in this case omp_notice_variable will be called later
on when it is gimplified. */
else if (! (flags & GOVD_LOCAL))
omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true);
}
else if (lang_hooks.decls.omp_privatize_by_reference (decl))
{
gcc_assert ((flags & GOVD_LOCAL) == 0);
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* Similar to the direct variable sized case above, we'll need the
size of references being privatized. */
if ((flags & GOVD_SHARED) == 0)
{
t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (TREE_CODE (t) != INTEGER_CST)
omp_notice_variable (ctx, t, true);
}
}
splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags);
}
/* Notice a threadprivate variable DECL used in OpenMP context CTX.
This just prints out diagnostics about threadprivate variable uses
in untied tasks. If DECL2 is non-NULL, prevent this warning
on that variable. */
static bool
omp_notice_threadprivate_variable (struct gimplify_omp_ctx *ctx, tree decl,
tree decl2)
{
splay_tree_node n;
if (ctx->region_type != ORT_UNTIED_TASK)
return false;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qs used in untied task",
IDENTIFIER_POINTER (DECL_NAME (decl)));
error ("%Henclosing task", &ctx->location);
splay_tree_insert (ctx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (ctx->variables, (splay_tree_key)decl2, 0);
return false;
}
/* Record the fact that DECL was used within the OpenMP context CTX.
IN_CODE is true when real code uses DECL, and false when we should
merely emit default(none) errors. Return true if DECL is going to
be remapped and thus DECL shouldn't be gimplified into its
DECL_VALUE_EXPR (if any). */
static bool
omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
{
splay_tree_node n;
unsigned flags = in_code ? GOVD_SEEN : 0;
bool ret = false, shared;
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
return false;
/* Threadprivate variables are predetermined. */
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
return omp_notice_threadprivate_variable (ctx, decl, NULL_TREE);
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
return omp_notice_threadprivate_variable (ctx, decl, value);
}
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
enum omp_clause_default_kind default_kind, kind;
struct gimplify_omp_ctx *octx;
if (ctx->region_type == ORT_WORKSHARE)
goto do_outer;
/* ??? Some compiler-generated variables (like SAVE_EXPRs) could be
remapped firstprivate instead of shared. To some extent this is
addressed in omp_firstprivatize_type_sizes, but not effectively. */
default_kind = ctx->default_kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
switch (default_kind)
{
case OMP_CLAUSE_DEFAULT_NONE:
error ("%qs not specified in enclosing parallel",
IDENTIFIER_POINTER (DECL_NAME
(lang_hooks.decls.omp_report_decl (decl))));
if ((ctx->region_type & ORT_TASK) != 0)
error ("%Henclosing task", &ctx->location);
else
error ("%Henclosing parallel", &ctx->location);
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT_SHARED:
flags |= GOVD_SHARED;
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
flags |= GOVD_FIRSTPRIVATE;
break;
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
/* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */
gcc_assert ((ctx->region_type & ORT_TASK) != 0);
if (ctx->outer_context)
omp_notice_variable (ctx->outer_context, decl, in_code);
for (octx = ctx->outer_context; octx; octx = octx->outer_context)
{
splay_tree_node n2;
n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl);
if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED)
{
flags |= GOVD_FIRSTPRIVATE;
break;
}
if ((octx->region_type & ORT_PARALLEL) != 0)
break;
}
if (flags & GOVD_FIRSTPRIVATE)
break;
if (octx == NULL
&& (TREE_CODE (decl) == PARM_DECL
|| (!is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl)))
{
flags |= GOVD_FIRSTPRIVATE;
break;
}
flags |= GOVD_SHARED;
break;
default:
gcc_unreachable ();
}
if ((flags & GOVD_PRIVATE)
&& lang_hooks.decls.omp_private_outer_ref (decl))
flags |= GOVD_PRIVATE_OUTER_REF;
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
goto do_outer;
}
if ((n->value & (GOVD_SEEN | GOVD_LOCAL)) == 0
&& (flags & (GOVD_SEEN | GOVD_LOCAL)) == GOVD_SEEN
&& DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
splay_tree_node n2;
tree t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
n2->value |= GOVD_SEEN;
}
shared = ((flags | n->value) & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF))
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
return true;
return ret;
}
/* Verify that DECL is private within CTX. If there's specific information
to the contrary in the innermost scope, generate an error. */
static bool
omp_is_private (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
{
if (ctx == gimplify_omp_ctxp)
{
error ("iteration variable %qs should be private",
IDENTIFIER_POINTER (DECL_NAME (decl)));
n->value = GOVD_PRIVATE;
return true;
}
else
return false;
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
error ("iteration variable %qs should not be firstprivate",
IDENTIFIER_POINTER (DECL_NAME (decl)));
else if ((n->value & GOVD_REDUCTION) != 0)
error ("iteration variable %qs should not be reduction",
IDENTIFIER_POINTER (DECL_NAME (decl)));
}
return (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx));
}
if (ctx->region_type != ORT_WORKSHARE)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl);
return false;
}
/* Return true if DECL is private within a parallel region
that binds to the current construct's context or in parallel
region's REDUCTION clause. */
static bool
omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
do
{
ctx = ctx->outer_context;
if (ctx == NULL)
return !(is_global_var (decl)
/* References might be private, but might be shared too. */
|| lang_hooks.decls.omp_privatize_by_reference (decl));
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL)
return (n->value & GOVD_SHARED) == 0;
}
while (ctx->region_type == ORT_WORKSHARE);
return false;
}
/* Scan the OpenMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
static void
gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
enum omp_region_type region_type)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
struct gimplify_ctx gctx;
tree c;
ctx = new_omp_context (region_type);
outer_ctx = ctx->outer_context;
while ((c = *list_p) != NULL)
{
bool remove = false;
bool notice_outer = true;
const char *check_non_private = NULL;
unsigned int flags;
tree decl;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c)))
{
flags |= GOVD_PRIVATE_OUTER_REF;
OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1;
}
else
notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_FIRSTPRIVATE:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
check_non_private = "firstprivate";
goto do_add;
case OMP_CLAUSE_LASTPRIVATE:
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "lastprivate";
goto do_add;
case OMP_CLAUSE_REDUCTION:
flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "reduction";
goto do_add;
do_add:
decl = OMP_CLAUSE_DECL (c);
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
{
remove = true;
break;
}
omp_add_variable (ctx, decl, flags);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
gimplify_omp_ctxp = ctx;
push_gimplify_context (&gctx);
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = gimple_seq_alloc ();
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = gimple_seq_alloc ();
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)));
push_gimplify_context (&gctx);
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)));
OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE;
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context (&gctx);
if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c);
OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c),
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)));
OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
if (notice_outer)
goto do_notice;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
{
remove = true;
break;
}
do_notice:
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& region_type == ORT_WORKSHARE
&& omp_check_private (ctx, decl))
{
error ("%s variable %qs is private in outer context",
check_non_private, IDENTIFIER_POINTER (DECL_NAME (decl)));
remove = true;
}
break;
case OMP_CLAUSE_IF:
OMP_CLAUSE_OPERAND (c, 0)
= gimple_boolify (OMP_CLAUSE_OPERAND (c, 0));
/* Fall through. */
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NUM_THREADS:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
gimplify_omp_ctxp = ctx;
}
/* For all variables that were not actually used within the context,
remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */
static int
gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
{
tree *list_p = (tree *) data;
tree decl = (tree) n->key;
unsigned flags = n->value;
enum omp_clause_code code;
tree clause;
bool private_debug;
if (flags & (GOVD_EXPLICIT | GOVD_LOCAL))
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE);
private_debug = true;
}
else
private_debug
= lang_hooks.decls.omp_private_debug_clause (decl,
!!(flags & GOVD_SHARED));
if (private_debug)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_SHARED)
{
if (is_global_var (decl))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
while (ctx != NULL)
{
splay_tree_node on
= splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_PRIVATE | GOVD_REDUCTION)) != 0)
break;
ctx = ctx->outer_context;
}
if (ctx == NULL)
return 0;
}
code = OMP_CLAUSE_SHARED;
}
else if (flags & GOVD_PRIVATE)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_FIRSTPRIVATE)
code = OMP_CLAUSE_FIRSTPRIVATE;
else
gcc_unreachable ();
clause = build_omp_clause (code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = *list_p;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
*list_p = clause;
lang_hooks.decls.omp_finish_clause (clause);
return 0;
}
static void
gimplify_adjust_omp_clauses (tree *list_p)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree c, decl;
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = !(n->value & GOVD_SEEN);
if (! remove)
{
bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED;
if ((n->value & GOVD_DEBUG_PRIVATE)
|| lang_hooks.decls.omp_private_debug_clause (decl, shared))
{
gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== GOVD_PRIVATE));
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
}
break;
case OMP_CLAUSE_LASTPRIVATE:
/* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to
accurately reflect the presence of a FIRSTPRIVATE clause. */
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
/* Add in any implicit data sharing. */
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p);
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
/* Gimplify the contents of an OMP_PARALLEL statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_parallel (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple g;
gimple_seq body = NULL;
struct gimplify_ctx gctx;
gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p,
OMP_PARALLEL_COMBINED (expr)
? ORT_COMBINED_PARALLEL
: ORT_PARALLEL);
push_gimplify_context (&gctx);
g = gimplify_and_return_first (OMP_PARALLEL_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr));
g = gimple_build_omp_parallel (body,
OMP_PARALLEL_CLAUSES (expr),
NULL_TREE, NULL_TREE);
if (OMP_PARALLEL_COMBINED (expr))
gimple_omp_set_subcode (g, GF_OMP_PARALLEL_COMBINED);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_TASK statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_task (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple g;
gimple_seq body = NULL;
struct gimplify_ctx gctx;
gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p,
find_omp_clause (OMP_TASK_CLAUSES (expr),
OMP_CLAUSE_UNTIED)
? ORT_UNTIED_TASK : ORT_TASK);
push_gimplify_context (&gctx);
g = gimplify_and_return_first (OMP_TASK_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (&OMP_TASK_CLAUSES (expr));
g = gimple_build_omp_task (body,
OMP_TASK_CLAUSES (expr),
NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the gross structure of an OMP_FOR statement. */
static enum gimplify_status
gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
{
tree for_stmt, decl, var, t;
enum gimplify_status ret = GS_OK;
gimple gfor;
gimple_seq for_body, for_pre_body;
int i;
for_stmt = *expr_p;
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
ORT_WORKSHARE);
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
for_body = gimple_seq_alloc ();
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt)));
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (decl));
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))
|| POINTER_TYPE_P (TREE_TYPE (decl)));
/* Make sure the iteration variable is private. */
if (omp_is_private (gimplify_omp_ctxp, decl))
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
/* If DECL is not a gimple register, create a temporary variable to act
as an iteration counter. This is valid, since DECL cannot be
modified in the body of the loop. */
if (!is_gimple_reg (decl))
{
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
TREE_OPERAND (t, 0) = var;
gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
}
else
var = decl;
ret |= gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (TREE_OPERAND (t, 0) == decl);
ret |= gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
switch (TREE_CODE (t))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), 1);
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), -1);
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case MODIFY_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
t = TREE_OPERAND (t, 1);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
TREE_OPERAND (t, 0) = var;
break;
}
/* Fallthru. */
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
}
ret |= gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
break;
default:
gcc_unreachable ();
}
if (var != decl || TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1)
{
tree c;
for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (c) == decl
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) == NULL)
{
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = TREE_OPERAND (t, 1);
gcc_assert (TREE_CODE (t) == PLUS_EXPR
|| TREE_CODE (t) == MINUS_EXPR
|| TREE_CODE (t) == POINTER_PLUS_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = build2 (TREE_CODE (t), TREE_TYPE (decl), decl,
TREE_OPERAND (t, 1));
gimplify_assign (decl, t,
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
}
}
}
gimplify_and_add (OMP_FOR_BODY (for_stmt), &for_body);
gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt));
gfor = gimple_build_omp_for (for_body, OMP_FOR_CLAUSES (for_stmt),
TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)),
for_pre_body);
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gimple_omp_for_set_index (gfor, i, TREE_OPERAND (t, 0));
gimple_omp_for_set_initial (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gimple_omp_for_set_cond (gfor, i, TREE_CODE (t));
gimple_omp_for_set_final (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gimple_omp_for_set_incr (gfor, i, TREE_OPERAND (t, 1));
}
gimplify_seq_add_stmt (pre_p, gfor);
return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR;
}
/* Gimplify the gross structure of other OpenMP worksharing constructs.
In particular, OMP_SECTIONS and OMP_SINGLE. */
static void
gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple stmt;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ORT_WORKSHARE);
gimplify_and_add (OMP_BODY (expr), &body);
gimplify_adjust_omp_clauses (&OMP_CLAUSES (expr));
if (TREE_CODE (expr) == OMP_SECTIONS)
stmt = gimple_build_omp_sections (body, OMP_CLAUSES (expr));
else if (TREE_CODE (expr) == OMP_SINGLE)
stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr));
else
gcc_unreachable ();
gimplify_seq_add_stmt (pre_p, stmt);
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
while ((CONVERT_EXPR_P (expr)
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_OPERAND (expr, 0) != error_mark_node
&& (TYPE_MAIN_VARIANT (TREE_TYPE (expr))
== TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0)))))
expr = TREE_OPERAND (expr, 0);
if (TREE_CODE (expr) == INDIRECT_REF)
{
expr = TREE_OPERAND (expr, 0);
while (expr != addr
&& (CONVERT_EXPR_P (expr)
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_CODE (expr) == TREE_CODE (addr)
&& TYPE_MAIN_VARIANT (TREE_TYPE (expr))
== TYPE_MAIN_VARIANT (TREE_TYPE (addr)))
{
expr = TREE_OPERAND (expr, 0);
addr = TREE_OPERAND (addr, 0);
}
if (expr == addr)
return true;
return (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0));
}
if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0))
return true;
return false;
}
/* Walk *EXPR_P and replace
appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve
the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as
a subexpression, 0 if it did not, or -1 if an error was encountered. */
static int
goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr,
tree lhs_var)
{
tree expr = *expr_p;
int saw_lhs;
if (goa_lhs_expr_p (expr, lhs_addr))
{
*expr_p = lhs_var;
return 1;
}
if (is_gimple_val (expr))
return 0;
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
case tcc_binary:
case tcc_comparison:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr,
lhs_var);
case tcc_unary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr,
lhs_var);
break;
case tcc_expression:
switch (TREE_CODE (expr))
{
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p,
lhs_addr, lhs_var);
case TRUTH_NOT_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
default:
break;
}
break;
default:
break;
}
if (saw_lhs == 0)
{
enum gimplify_status gs;
gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue);
if (gs != GS_ALL_DONE)
saw_lhs = -1;
}
return saw_lhs;
}
/* Gimplify an OMP_ATOMIC statement. */
static enum gimplify_status
gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p)
{
tree addr = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
tree tmp_load;
tmp_load = create_tmp_var (type, NULL);
if (TREE_CODE (type) == COMPLEX_TYPE || TREE_CODE (type) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (tmp_load) = 1;
if (goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0)
return GS_ERROR;
if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
gimplify_seq_add_stmt (pre_p, gimple_build_omp_atomic_load (tmp_load, addr));
if (gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
gimplify_seq_add_stmt (pre_p, gimple_build_omp_atomic_store (rhs));
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Converts the GENERIC expression tree *EXPR_P to GIMPLE. If the
expression produces a value to be used as an operand inside a GIMPLE
statement, the value will be stored back in *EXPR_P. This value will
be a tree of class tcc_declaration, tcc_constant, tcc_reference or
an SSA_NAME. The corresponding sequence of GIMPLE statements is
emitted in PRE_P and POST_P.
Additionally, this process may overwrite parts of the input
expression during gimplification. Ideally, it should be
possible to do non-destructive gimplification.
EXPR_P points to the GENERIC expression to convert to GIMPLE. If
the expression needs to evaluate to a value to be used as
an operand in a GIMPLE statement, this value will be stored in
*EXPR_P on exit. This happens when the caller specifies one
of fb_lvalue or fb_rvalue fallback flags.
PRE_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of EXPR and all the side-effects that must
be executed before the main expression. On exit, the last
statement of PRE_P is the core statement being gimplified. For
instance, when gimplifying 'if (++a)' the last statement in
PRE_P will be 'if (t.1)' where t.1 is the result of
pre-incrementing 'a'.
POST_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of all the side-effects that must be executed
after the main expression. If this is NULL, the post
side-effects are stored at the end of PRE_P.
The reason why the output is split in two is to handle post
side-effects explicitly. In some cases, an expression may have
inner and outer post side-effects which need to be emitted in
an order different from the one given by the recursive
traversal. For instance, for the expression (*p--)++ the post
side-effects of '--' must actually occur *after* the post
side-effects of '++'. However, gimplification will first visit
the inner expression, so if a separate POST sequence was not
used, the resulting sequence would be:
1 t.1 = *p
2 p = p - 1
3 t.2 = t.1 + 1
4 *p = t.2
However, the post-decrement operation in line #2 must not be
evaluated until after the store to *p at line #4, so the
correct sequence should be:
1 t.1 = *p
2 t.2 = t.1 + 1
3 *p = t.2
4 p = p - 1
So, by specifying a separate post queue, it is possible
to emit the post side-effects in the correct order.
If POST_P is NULL, an internal queue will be used. Before
returning to the caller, the sequence POST_P is appended to
the main output sequence PRE_P.
GIMPLE_TEST_F points to a function that takes a tree T and
returns nonzero if T is in the GIMPLE form requested by the
caller. The GIMPLE predicates are in tree-gimple.c.
FALLBACK tells the function what sort of a temporary we want if
gimplification cannot produce an expression that complies with
GIMPLE_TEST_F.
fb_none means that no temporary should be generated
fb_rvalue means that an rvalue is OK to generate
fb_lvalue means that an lvalue is OK to generate
fb_either means that either is OK, but an lvalue is preferable.
fb_mayfail means that gimplification may fail (in which case
GS_ERROR will be returned)
The return value is either GS_ERROR or GS_ALL_DONE, since this
function iterates until EXPR is completely gimplified or an error
occurs. */
enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback)
{
tree tmp;
gimple_seq internal_pre = NULL;
gimple_seq internal_post = NULL;
tree save_expr;
bool is_statement;
location_t saved_location;
enum gimplify_status ret;
gimple_stmt_iterator pre_last_gsi, post_last_gsi;
save_expr = *expr_p;
if (save_expr == NULL_TREE)
return GS_ALL_DONE;
/* If we are gimplifying a top-level statement, PRE_P must be valid. */
is_statement = gimple_test_f == is_gimple_stmt;
if (is_statement)
gcc_assert (pre_p);
/* Consistency checks. */
if (gimple_test_f == is_gimple_reg)
gcc_assert (fallback & (fb_rvalue | fb_lvalue));
else if (gimple_test_f == is_gimple_val
|| gimple_test_f == is_gimple_formal_tmp_rhs
|| gimple_test_f == is_gimple_formal_tmp_or_call_rhs
|| gimple_test_f == is_gimple_formal_tmp_reg
|| gimple_test_f == is_gimple_formal_tmp_var
|| gimple_test_f == is_gimple_call_addr
|| gimple_test_f == is_gimple_condexpr
|| gimple_test_f == is_gimple_mem_rhs
|| gimple_test_f == is_gimple_mem_or_call_rhs
|| gimple_test_f == is_gimple_reg_rhs
|| gimple_test_f == is_gimple_reg_or_call_rhs
|| gimple_test_f == is_gimple_asm_val)
gcc_assert (fallback & fb_rvalue);
else if (gimple_test_f == is_gimple_min_lval
|| gimple_test_f == is_gimple_lvalue)
gcc_assert (fallback & fb_lvalue);
else if (gimple_test_f == is_gimple_addressable)
gcc_assert (fallback & fb_either);
else if (gimple_test_f == is_gimple_stmt)
gcc_assert (fallback == fb_none);
else
{
/* We should have recognized the GIMPLE_TEST_F predicate to
know what kind of fallback to use in case a temporary is
needed to hold the value or address of *EXPR_P. */
gcc_unreachable ();
}
/* We used to check the predicate here and return immediately if it
succeeds. This is wrong; the design is for gimplification to be
idempotent, and for the predicates to only test for valid forms, not
whether they are fully simplified. */
if (pre_p == NULL)
pre_p = &internal_pre;
if (post_p == NULL)
post_p = &internal_post;
/* Remember the last statements added to PRE_P and POST_P. Every
new statement added by the gimplification helpers needs to be
annotated with location information. To centralize the
responsibility, we remember the last statement that had been
added to both queues before gimplifying *EXPR_P. If
gimplification produces new statements in PRE_P and POST_P, those
statements will be annotated with the same location information
as *EXPR_P. */
pre_last_gsi = gsi_last (*pre_p);
post_last_gsi = gsi_last (*post_p);
saved_location = input_location;
if (save_expr != error_mark_node
&& EXPR_HAS_LOCATION (*expr_p))
input_location = EXPR_LOCATION (*expr_p);
/* Loop over the specific gimplifiers until the toplevel node
remains the same. */
do
{
/* Strip away as many useless type conversions as possible
at the toplevel. */
STRIP_USELESS_TYPE_CONVERSION (*expr_p);
/* Remember the expr. */
save_expr = *expr_p;
/* Die, die, die, my darling. */
if (save_expr == error_mark_node
|| (TREE_TYPE (save_expr)
&& TREE_TYPE (save_expr) == error_mark_node))
{
ret = GS_ERROR;
break;
}
/* Do any language-specific gimplification. */
ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p);
if (ret == GS_OK)
{
if (*expr_p == NULL_TREE)
break;
if (*expr_p != save_expr)
continue;
}
else if (ret != GS_UNHANDLED)
break;
ret = GS_OK;
switch (TREE_CODE (*expr_p))
{
/* First deal with the special cases. */
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
ret = gimplify_self_mod_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
case VIEW_CONVERT_EXPR:
ret = gimplify_compound_lval (expr_p, pre_p, post_p,
fallback ? fallback : fb_rvalue);
break;
case COND_EXPR:
ret = gimplify_cond_expr (expr_p, pre_p, fallback);
/* C99 code may assign to an array in a structure value of a
conditional expression, and this has undefined behavior
only on execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
}
break;
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
/* C99 code may assign to an array in a structure returned
from a function, and this has undefined behavior only on
execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
}
break;
case TREE_LIST:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;
case MODIFY_EXPR:
case INIT_EXPR:
ret = gimplify_modify_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
ret = gimplify_boolean_expr (expr_p);
break;
case TRUTH_NOT_EXPR:
if (TREE_CODE (TREE_TYPE (*expr_p)) != BOOLEAN_TYPE)
{
tree type = TREE_TYPE (*expr_p);
*expr_p = fold_convert (type, gimple_boolify (*expr_p));
ret = GS_OK;
break;
}
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case ADDR_EXPR:
ret = gimplify_addr_expr (expr_p, pre_p, post_p);
break;
case VA_ARG_EXPR:
ret = gimplify_va_arg_expr (expr_p, pre_p, post_p);
break;
CASE_CONVERT:
if (IS_EMPTY_STMT (*expr_p))
{
ret = GS_ALL_DONE;
break;
}
if (VOID_TYPE_P (TREE_TYPE (*expr_p))
|| fallback == fb_none)
{
/* Just strip a conversion to void (or in void context) and
try again. */
*expr_p = TREE_OPERAND (*expr_p, 0);
break;
}
ret = gimplify_conversion (expr_p);
if (ret == GS_ERROR)
break;
if (*expr_p != save_expr)
break;
/* FALLTHRU */
case FIX_TRUNC_EXPR:
/* unary_expr: ... | '(' cast ')' val | ... */
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case INDIRECT_REF:
*expr_p = fold_indirect_ref (*expr_p);
if (*expr_p != save_expr)
break;
/* else fall through. */
case ALIGN_INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_reg, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
/* Constants need not be gimplified. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
ret = GS_ALL_DONE;
break;
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
ret = GS_ALL_DONE;
else
*expr_p = DECL_INITIAL (*expr_p);
break;
case DECL_EXPR:
ret = gimplify_decl_expr (expr_p, pre_p);
break;
case EXC_PTR_EXPR:
/* FIXME make this a decl. */
ret = GS_ALL_DONE;
break;
case BIND_EXPR:
ret = gimplify_bind_expr (expr_p, pre_p);
break;
case LOOP_EXPR:
ret = gimplify_loop_expr (expr_p, pre_p);
break;
case SWITCH_EXPR:
ret = gimplify_switch_expr (expr_p, pre_p);
break;
case EXIT_EXPR:
ret = gimplify_exit_expr (expr_p);
break;
case GOTO_EXPR:
/* If the target is not LABEL, then it is a computed jump
and the target needs to be gimplified. */
if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL)
{
ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p,
NULL, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
break;
}
gimplify_seq_add_stmt (pre_p,
gimple_build_goto (GOTO_DESTINATION (*expr_p)));
break;
case PREDICT_EXPR:
gimplify_seq_add_stmt (pre_p,
gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p),
PREDICT_EXPR_OUTCOME (*expr_p)));
ret = GS_ALL_DONE;
break;
case LABEL_EXPR:
ret = GS_ALL_DONE;
gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
== current_function_decl);
gimplify_seq_add_stmt (pre_p,
gimple_build_label (LABEL_EXPR_LABEL (*expr_p)));
break;
case CASE_LABEL_EXPR:
ret = gimplify_case_label_expr (expr_p, pre_p);
break;
case RETURN_EXPR:
ret = gimplify_return_expr (*expr_p, pre_p);
break;
case CONSTRUCTOR:
/* Don't reduce this in place; let gimplify_init_constructor work its
magic. Buf if we're just elaborating this for side effects, just
gimplify any element that has side-effects. */
if (fallback == fb_none)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
tree temp = NULL_TREE;
for (ix = 0;
VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (*expr_p),
ix, ce);
ix++)
if (TREE_SIDE_EFFECTS (ce->value))
append_to_statement_list (ce->value, &temp);
*expr_p = temp;
ret = GS_OK;
}
/* C99 code may assign to an array in a constructed
structure or union, and this has undefined behavior only
on execution, so create a temporary if an lvalue is
required. */
else if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
}
else
ret = GS_ALL_DONE;
break;
/* The following are special cases that are not handled by the
original GIMPLE grammar. */
/* SAVE_EXPR nodes are converted into a GIMPLE identifier and
eliminated. */
case SAVE_EXPR:
ret = gimplify_save_expr (expr_p, pre_p, post_p);
break;
case BIT_FIELD_REF:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_lvalue, fb_either);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, MIN (r1, r2));
}
break;
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
{
gimple_seq eval, cleanup;
gimple try_;
eval = cleanup = NULL;
gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval);
gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup);
/* Don't create bogus GIMPLE_TRY with empty cleanup. */
if (gimple_seq_empty_p (cleanup))
{
gimple_seq_add_seq (pre_p, eval);
ret = GS_ALL_DONE;
break;
}
try_ = gimple_build_try (eval, cleanup,
TREE_CODE (*expr_p) == TRY_FINALLY_EXPR
? GIMPLE_TRY_FINALLY
: GIMPLE_TRY_CATCH);
if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR)
gimple_try_set_catch_is_cleanup (try_,
TRY_CATCH_IS_CLEANUP (*expr_p));
gimplify_seq_add_stmt (pre_p, try_);
ret = GS_ALL_DONE;
break;
}
case CLEANUP_POINT_EXPR:
ret = gimplify_cleanup_point_expr (expr_p, pre_p);
break;
case TARGET_EXPR:
ret = gimplify_target_expr (expr_p, pre_p, post_p);
break;
case CATCH_EXPR:
{
gimple c;
gimple_seq handler = NULL;
gimplify_and_add (CATCH_BODY (*expr_p), &handler);
c = gimple_build_catch (CATCH_TYPES (*expr_p), handler);
gimplify_seq_add_stmt (pre_p, c);
ret = GS_ALL_DONE;
break;
}
case EH_FILTER_EXPR:
{
gimple ehf;
gimple_seq failure = NULL;
gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure);
ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure);
gimple_eh_filter_set_must_not_throw
(ehf, EH_FILTER_MUST_NOT_THROW (*expr_p));
gimplify_seq_add_stmt (pre_p, ehf);
ret = GS_ALL_DONE;
break;
}
case CHANGE_DYNAMIC_TYPE_EXPR:
{
gimple cdt;
ret = gimplify_expr (&CHANGE_DYNAMIC_TYPE_LOCATION (*expr_p),
pre_p, post_p, is_gimple_reg, fb_lvalue);
cdt = gimple_build_cdt (CHANGE_DYNAMIC_TYPE_NEW_TYPE (*expr_p),
CHANGE_DYNAMIC_TYPE_LOCATION (*expr_p));
gimplify_seq_add_stmt (pre_p, cdt);
ret = GS_ALL_DONE;
}
break;
case OBJ_TYPE_REF:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
TREE_SIDE_EFFECTS (*expr_p) = 0;
ret = MIN (r0, r1);
}
break;
case LABEL_DECL:
/* We get here when taking the address of a label. We mark
the label as "forced"; meaning it can never be removed and
it is a potential target for any computed goto. */
FORCED_LABEL (*expr_p) = 1;
ret = GS_ALL_DONE;
break;
case STATEMENT_LIST:
ret = gimplify_statement_list (expr_p, pre_p);
break;
case WITH_SIZE_EXPR:
{
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p == &internal_post ? NULL : post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
}
break;
case VAR_DECL:
case PARM_DECL:
ret = gimplify_var_or_parm_decl (expr_p);
break;
case RESULT_DECL:
/* When within an OpenMP context, notice uses of variables. */
if (gimplify_omp_ctxp)
omp_notice_variable (gimplify_omp_ctxp, *expr_p, true);
ret = GS_ALL_DONE;
break;
case SSA_NAME:
/* Allow callbacks into the gimplifier during optimization. */
ret = GS_ALL_DONE;
break;
case OMP_PARALLEL:
gimplify_omp_parallel (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_TASK:
gimplify_omp_task (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_FOR:
ret = gimplify_omp_for (expr_p, pre_p);
break;
case OMP_SECTIONS:
case OMP_SINGLE:
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
{
gimple_seq body = NULL;
gimple g;
gimplify_and_add (OMP_BODY (*expr_p), &body);
switch (TREE_CODE (*expr_p))
{
case OMP_SECTION:
g = gimple_build_omp_section (body);
break;
case OMP_MASTER:
g = gimple_build_omp_master (body);
break;
case OMP_ORDERED:
g = gimple_build_omp_ordered (body);
break;
case OMP_CRITICAL:
g = gimple_build_omp_critical (body,
OMP_CRITICAL_NAME (*expr_p));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, g);
ret = GS_ALL_DONE;
break;
}
case OMP_ATOMIC:
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
case POINTER_PLUS_EXPR:
/* Convert ((type *)A)+offset into &A->field_of_type_and_offset.
The second is gimple immediate saving a need for extra statement.
*/
if (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST
&& (tmp = maybe_fold_offset_to_address
(TREE_OPERAND (*expr_p, 0), TREE_OPERAND (*expr_p, 1),
TREE_TYPE (*expr_p))))
{
*expr_p = tmp;
break;
}
/* Convert (void *)&a + 4 into (void *)&a[1]. */
if (TREE_CODE (TREE_OPERAND (*expr_p, 0)) == NOP_EXPR
&& TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST
&& POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (*expr_p,
0),0)))
&& (tmp = maybe_fold_offset_to_address
(TREE_OPERAND (TREE_OPERAND (*expr_p, 0), 0),
TREE_OPERAND (*expr_p, 1),
TREE_TYPE (TREE_OPERAND (TREE_OPERAND (*expr_p, 0),
0)))))
{
*expr_p = fold_convert (TREE_TYPE (*expr_p), tmp);
break;
}
/* FALLTHRU */
default:
switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
case tcc_comparison:
/* Handle comparison of objects of non scalar mode aggregates
with a call to memcmp. It would be nice to only have to do
this for variable-sized objects, but then we'd have to allow
the same nest of reference nodes we allow for MODIFY_EXPR and
that's too complex.
Compare scalar mode aggregates as scalar mode values. Using
memcmp for them would be very inefficient at best, and is
plain wrong if bitfields are involved. */
{
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1));
if (!AGGREGATE_TYPE_P (type))
goto expr_2;
else if (TYPE_MODE (type) != BLKmode)
ret = gimplify_scalar_mode_aggregate_compare (expr_p);
else
ret = gimplify_variable_sized_compare (expr_p);
break;
}
/* If *EXPR_P does not need to be special-cased, handle it
according to its class. */
case tcc_unary:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
break;
case tcc_binary:
expr_2:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
break;
}
case tcc_declaration:
case tcc_constant:
ret = GS_ALL_DONE;
goto dont_recalculate;
default:
gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR
|| TREE_CODE (*expr_p) == TRUTH_OR_EXPR
|| TREE_CODE (*expr_p) == TRUTH_XOR_EXPR);
goto expr_2;
}
recalculate_side_effects (*expr_p);
dont_recalculate:
break;
}
/* If we replaced *expr_p, gimplify again. */
if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr))
ret = GS_ALL_DONE;
}
while (ret == GS_OK);
/* If we encountered an error_mark somewhere nested inside, either
stub out the statement or propagate the error back out. */
if (ret == GS_ERROR)
{
if (is_statement)
*expr_p = NULL;
goto out;
}
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
/* We aren't looking for a value, and we don't have a valid
statement. If it doesn't have side-effects, throw it away. */
if (!TREE_SIDE_EFFECTS (*expr_p))
*expr_p = NULL;
else if (!TREE_THIS_VOLATILE (*expr_p))
{
/* This is probably a _REF that contains something nested that
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
switch (code)
{
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
gimple_test_f, fallback);
break;
default:
/* Anything else with side-effects must be converted to
a valid statement before we get here. */
gcc_unreachable ();
}
*expr_p = NULL;
}
else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))
&& TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode)
{
/* Historically, the compiler has treated a bare reference
to a non-BLKmode volatile lvalue as forcing a load. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p));
/* Normally, we do not want to create a temporary for a
TREE_ADDRESSABLE type because such a type should not be
copied by bitwise-assignment. However, we make an
exception here, as all we are doing here is ensuring that
we read the bytes that make up the type. We use
create_tmp_var_raw because create_tmp_var will abort when
given a TREE_ADDRESSABLE type. */
tree tmp = create_tmp_var_raw (type, "vol");
gimple_add_tmp_var (tmp);
gimplify_assign (tmp, *expr_p, pre_p);
*expr_p = NULL;
}
else
/* We can't do anything useful with a volatile reference to
an incomplete type, so just throw it away. Likewise for
a BLKmode type, since any implicit inner load should
already have been turned into an explicit one by the
gimplification process. */
*expr_p = NULL;
}
/* If we are gimplifying at the statement level, we're done. Tack
everything together and return. */
if (fallback == fb_none || is_statement)
{
/* Since *EXPR_P has been converted into a GIMPLE tuple, clear
it out for GC to reclaim it. */
*expr_p = NULL_TREE;
if (!gimple_seq_empty_p (internal_pre)
|| !gimple_seq_empty_p (internal_post))
{
gimplify_seq_add_seq (&internal_pre, internal_post);
gimplify_seq_add_seq (pre_p, internal_pre);
}
/* The result of gimplifying *EXPR_P is going to be the last few
statements in *PRE_P and *POST_P. Add location information
to all the statements that were added by the gimplification
helpers. */
if (!gimple_seq_empty_p (*pre_p))
annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location);
if (!gimple_seq_empty_p (*post_p))
annotate_all_with_location_after (*post_p, post_last_gsi,
input_location);
goto out;
}
#ifdef ENABLE_GIMPLE_CHECKING
if (*expr_p)
{
enum tree_code code = TREE_CODE (*expr_p);
/* These expressions should already be in gimple IR form. */
gcc_assert (code != MODIFY_EXPR
&& code != ASM_EXPR
&& code != BIND_EXPR
&& code != CATCH_EXPR
&& (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr)
&& code != EH_FILTER_EXPR
&& code != GOTO_EXPR
&& code != LABEL_EXPR
&& code != LOOP_EXPR
&& code != RESX_EXPR
&& code != SWITCH_EXPR
&& code != TRY_FINALLY_EXPR
&& code != OMP_CRITICAL
&& code != OMP_FOR
&& code != OMP_MASTER
&& code != OMP_ORDERED
&& code != OMP_PARALLEL
&& code != OMP_SECTIONS
&& code != OMP_SECTION
&& code != OMP_SINGLE);
}
#endif
/* Otherwise we're gimplifying a subexpression, so the resulting
value is interesting. If it's a valid operand that matches
GIMPLE_TEST_F, we're done. Unless we are handling some
post-effects internally; if that's the case, we need to copy into
a temporary before adding the post-effects to POST_P. */
if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p))
goto out;
/* Otherwise, we need to create a new temporary for the gimplified
expression. */
/* We can't return an lvalue if we have an internal postqueue. The
object the lvalue refers to would (probably) be modified by the
postqueue; we need to copy the value out first, which means an
rvalue. */
if ((fallback & fb_lvalue)
&& gimple_seq_empty_p (internal_post)
&& is_gimple_addressable (*expr_p))
{
/* An lvalue will do. Take the address of the expression, store it
in a temporary, and replace the expression with an INDIRECT_REF of
that temporary. */
tmp = build_fold_addr_expr (*expr_p);
gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp);
}
else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_or_call_rhs (*expr_p))
{
/* An rvalue will do. Assign the gimplified expression into a
new temporary TMP and replace the original expression with
TMP. First, make sure that the expression has a type so that
it can be assigned into a temporary. */
gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
if (!gimple_seq_empty_p (internal_post) || (fallback & fb_lvalue))
/* The postqueue might change the value of the expression between
the initialization and use of the temporary, so we can't use a
formal temp. FIXME do we care? */
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
else
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
if (TREE_CODE (*expr_p) != SSA_NAME)
DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1;
}
else
{
#ifdef ENABLE_GIMPLE_CHECKING
if (!(fallback & fb_mayfail))
{
fprintf (stderr, "gimplification failed:\n");
print_generic_expr (stderr, *expr_p, 0);
debug_tree (*expr_p);
internal_error ("gimplification failed");
}
#endif
gcc_assert (fallback & fb_mayfail);
/* If this is an asm statement, and the user asked for the
impossible, don't die. Fail and let gimplify_asm_expr
issue an error. */
ret = GS_ERROR;
goto out;
}
/* Make sure the temporary matches our predicate. */
gcc_assert ((*gimple_test_f) (*expr_p));
if (!gimple_seq_empty_p (internal_post))
{
annotate_all_with_location (internal_post, input_location);
gimplify_seq_add_seq (pre_p, internal_post);
}
out:
input_location = saved_location;
return ret;
}
/* Look through TYPE for variable-sized objects and gimplify each such
size that we find. Add to LIST_P any statements generated. */
void
gimplify_type_sizes (tree type, gimple_seq *list_p)
{
tree field, t;
if (type == NULL || type == error_mark_node)
return;
/* We first do the main variant, then copy into any other variants. */
type = TYPE_MAIN_VARIANT (type);
/* Avoid infinite recursion. */
if (TYPE_SIZES_GIMPLIFIED (type))
return;
TYPE_SIZES_GIMPLIFIED (type) = 1;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p);
gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type);
TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type);
}
break;
case ARRAY_TYPE:
/* These types may not have declarations, so handle them here. */
gimplify_type_sizes (TREE_TYPE (type), list_p);
gimplify_type_sizes (TYPE_DOMAIN (type), list_p);
/* When not optimizing, ensure VLA bounds aren't removed. */
if (!optimize
&& TYPE_DOMAIN (type)
&& INTEGRAL_TYPE_P (TYPE_DOMAIN (type)))
{
t = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
t = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
}
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p);
gimplify_one_sizepos (&DECL_SIZE (field), list_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p);
gimplify_type_sizes (TREE_TYPE (field), list_p);
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We used to recurse on the pointed-to type here, which turned out to
be incorrect because its definition might refer to variables not
yet initialized at this point if a forward declaration is involved.
It was actually useful for anonymous pointed-to types to ensure
that the sizes evaluation dominates every possible later use of the
values. Restricting to such types here would be safe since there
is no possible forward declaration around, but would introduce an
undesirable middle-end semantic to anonymity. We then defer to
front-ends the responsibility of ensuring that the sizes are
evaluated both early and late enough, e.g. by attaching artificial
type declarations to the tree. */
break;
default:
break;
}
gimplify_one_sizepos (&TYPE_SIZE (type), list_p);
gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_SIZE (t) = TYPE_SIZE (type);
TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type);
TYPE_SIZES_GIMPLIFIED (t) = 1;
}
}
/* A subroutine of gimplify_type_sizes to make sure that *EXPR_P,
a size or position, has had all of its SAVE_EXPRs evaluated.
We add any required statements to *STMT_P. */
void
gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
{
tree type, expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (expr == NULL_TREE || TREE_CONSTANT (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr))
return;
type = TREE_TYPE (expr);
*expr_p = unshare_expr (expr);
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue);
expr = *expr_p;
/* Verify that we've an exact type match with the original expression.
In particular, we do not wish to drop a "sizetype" in favour of a
type of similar dimensions. We don't want to pollute the generic
type-stripping code with this knowledge because it doesn't matter
for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT
and friends retain their "sizetype-ness". */
if (TREE_TYPE (expr) != type
&& TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type))
{
tree tmp;
gimple stmt;
*expr_p = create_tmp_var (type, NULL);
tmp = build1 (NOP_EXPR, type, expr);
stmt = gimplify_assign (*expr_p, tmp, stmt_p);
if (EXPR_HAS_LOCATION (expr))
gimple_set_location (stmt, *EXPR_LOCUS (expr));
else
gimple_set_location (stmt, input_location);
}
}
/* Gimplify the body of statements pointed to by BODY_P and return a
GIMPLE_BIND containing the sequence of GIMPLE statements
corresponding to BODY_P. FNDECL is the function decl containing
*BODY_P. */
gimple
gimplify_body (tree *body_p, tree fndecl, bool do_parms)
{
location_t saved_location = input_location;
gimple_seq parm_stmts, seq;
gimple outer_bind;
struct gimplify_ctx gctx;
timevar_push (TV_TREE_GIMPLIFY);
/* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during
gimplification. */
default_rtl_profile ();
gcc_assert (gimplify_ctxp == NULL);
push_gimplify_context (&gctx);
/* Unshare most shared trees in the body and in that of any nested functions.
It would seem we don't have to do this for nested functions because
they are supposed to be output and then the outer function gimplified
first, but the g++ front end doesn't always do it that way. */
unshare_body (body_p, fndecl);
unvisit_body (body_p, fndecl);
/* Make sure input_location isn't set to something weird. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
the body so that DECL_VALUE_EXPR gets processed correctly. */
parm_stmts = (do_parms) ? gimplify_parameters () : NULL;
/* Gimplify the function's body. */
seq = NULL;
gimplify_stmt (body_p, &seq);
outer_bind = gimple_seq_first_stmt (seq);
if (!outer_bind)
{
outer_bind = gimple_build_nop ();
gimplify_seq_add_stmt (&seq, outer_bind);
}
/* The body must contain exactly one statement, a GIMPLE_BIND. If this is
not the case, wrap everything in a GIMPLE_BIND to make it so. */
if (gimple_code (outer_bind) == GIMPLE_BIND
&& gimple_seq_first (seq) == gimple_seq_last (seq))
;
else
outer_bind = gimple_build_bind (NULL_TREE, seq, NULL);
*body_p = NULL_TREE;
/* If we had callee-copies statements, insert them at the beginning
of the function. */
if (!gimple_seq_empty_p (parm_stmts))
{
gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind));
gimple_bind_set_body (outer_bind, parm_stmts);
}
pop_gimplify_context (outer_bind);
gcc_assert (gimplify_ctxp == NULL);
#ifdef ENABLE_TYPES_CHECKING
if (!errorcount && !sorrycount)
verify_types_in_gimple_seq (gimple_bind_body (outer_bind));
#endif
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
return outer_bind;
}
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify.
Returns the sequence of GIMPLE statements corresponding to the body
of FNDECL. */
void
gimplify_function_tree (tree fndecl)
{
tree oldfn, parm, ret;
gimple_seq seq;
gimple bind;
oldfn = current_function_decl;
current_function_decl = fndecl;
if (DECL_STRUCT_FUNCTION (fndecl))
push_cfun (DECL_STRUCT_FUNCTION (fndecl));
else
push_struct_function (fndecl);
for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = TREE_CHAIN (parm))
{
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (parm)
&& !needs_to_live_in_memory (parm))
DECL_GIMPLE_REG_P (parm) = 1;
}
ret = DECL_RESULT (fndecl);
if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE)
&& !needs_to_live_in_memory (ret))
DECL_GIMPLE_REG_P (ret) = 1;
bind = gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true);
/* The tree body of the function is no longer needed, replace it
with the new GIMPLE body. */
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, bind);
gimple_set_body (fndecl, seq);
/* If we're instrumenting function entry/exit, then prepend the call to
the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to
catch the exit hook. */
/* ??? Add some way to ignore exceptions for this TFE. */
if (flag_instrument_function_entry_exit
&& !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)
&& !flag_instrument_functions_exclude_p (fndecl))
{
tree x;
gimple new_bind;
gimple tf;
gimple_seq cleanup = NULL, body = NULL;
x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT];
gimplify_seq_add_stmt (&cleanup, gimple_build_call (x, 0));
tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY);
x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER];
gimplify_seq_add_stmt (&body, gimple_build_call (x, 0));
gimplify_seq_add_stmt (&body, tf);
new_bind = gimple_build_bind (NULL, body, gimple_bind_block (bind));
/* Clear the block for BIND, since it is no longer directly inside
the function, but within a try block. */
gimple_bind_set_block (bind, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
}
DECL_SAVED_TREE (fndecl) = NULL_TREE;
current_function_decl = oldfn;
pop_cfun ();
}
/* Some transformations like inlining may invalidate the GIMPLE form
for operands. This function traverses all the operands in STMT and
gimplifies anything that is not a valid gimple operand. Any new
GIMPLE statements are inserted before *GSI_P. */
void
gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p)
{
size_t i, num_ops;
tree orig_lhs = NULL_TREE, lhs, t;
gimple_seq pre = NULL;
gimple post_stmt = NULL;
struct gimplify_ctx gctx;
push_gimplify_context (&gctx);
gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun);
switch (gimple_code (stmt))
{
case GIMPLE_COND:
gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
break;
case GIMPLE_SWITCH:
gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
gimplify_expr (gimple_omp_atomic_load_rhs_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
break;
case GIMPLE_ASM:
{
size_t i, noutputs = gimple_asm_noutputs (stmt);
const char *constraint, **oconstraints;
bool allows_mem, allows_reg, is_inout;
oconstraints
= (const char **) alloca ((noutputs) * sizeof (const char *));
for (i = 0; i < noutputs; i++)
{
tree op = gimple_asm_output_op (stmt, i);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op)));
oconstraints[i] = constraint;
parse_output_constraint (&constraint, i, 0, 0, &allows_mem,
&allows_reg, &is_inout);
gimplify_expr (&TREE_VALUE (op), &pre, NULL,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
}
for (i = 0; i < gimple_asm_ninputs (stmt); i++)
{
tree op = gimple_asm_input_op (stmt, i);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem)
allows_reg = 0;
if (!allows_reg && allows_mem)
gimplify_expr (&TREE_VALUE (op), &pre, NULL,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
else
gimplify_expr (&TREE_VALUE (op), &pre, NULL,
is_gimple_asm_val, fb_rvalue);
}
}
break;
default:
/* NOTE: We start gimplifying operands from last to first to
make sure that side-effects on the RHS of calls, assignments
and ASMs are executed before the LHS. The ordering is not
important for other statements. */
num_ops = gimple_num_ops (stmt);
orig_lhs = gimple_get_lhs (stmt);
for (i = num_ops; i > 0; i--)
{
tree op = gimple_op (stmt, i - 1);
if (op == NULL_TREE)
continue;
if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt)))
gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue);
else if (i == 2
&& is_gimple_assign (stmt)
&& num_ops == 2
&& get_gimple_rhs_class (gimple_expr_code (stmt))
== GIMPLE_SINGLE_RHS)
gimplify_expr (&op, &pre, NULL,
rhs_predicate_for (gimple_assign_lhs (stmt)),
fb_rvalue);
else if (i == 2 && is_gimple_call (stmt))
{
if (TREE_CODE (op) == FUNCTION_DECL)
continue;
gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue);
}
else
gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue);
gimple_set_op (stmt, i - 1, op);
}
lhs = gimple_get_lhs (stmt);
/* If the LHS changed it in a way that requires a simple RHS,
create temporary. */
if (lhs && !is_gimple_formal_tmp_var (lhs))
{
bool need_temp = false;
if (is_gimple_assign (stmt)
&& num_ops == 2
&& get_gimple_rhs_class (gimple_expr_code (stmt))
== GIMPLE_SINGLE_RHS)
gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL,
rhs_predicate_for (gimple_assign_lhs (stmt)),
fb_rvalue);
else if (is_gimple_reg (lhs))
{
if (is_gimple_reg_type (TREE_TYPE (lhs)))
{
if (is_gimple_call (stmt))
{
i = gimple_call_flags (stmt);
if ((i & ECF_LOOPING_CONST_OR_PURE)
|| !(i & (ECF_CONST | ECF_PURE)))
need_temp = true;
}
if (stmt_can_throw_internal (stmt))
need_temp = true;
}
}
else
{
if (is_gimple_reg_type (TREE_TYPE (lhs)))
need_temp = true;
else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode)
{
if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (!aggregate_value_p (TREE_TYPE (lhs), fndecl)
&& !(fndecl && DECL_RESULT (fndecl)
&& DECL_BY_REFERENCE (DECL_RESULT (fndecl))))
need_temp = true;
}
else
need_temp = true;
}
}
if (need_temp)
{
tree temp = create_tmp_var (TREE_TYPE (lhs), NULL);
DECL_GIMPLE_FORMAL_TEMP_P (temp) = 1;
if (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (lhs)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (temp) = 1;
if (TREE_CODE (orig_lhs) == SSA_NAME)
orig_lhs = SSA_NAME_VAR (orig_lhs);
if (TREE_CODE (orig_lhs) == VAR_DECL
&& DECL_BASED_ON_RESTRICT_P (orig_lhs))
{
DECL_BASED_ON_RESTRICT_P (temp) = 1;
SET_DECL_RESTRICT_BASE (temp,
DECL_GET_RESTRICT_BASE (orig_lhs));
}
if (gimple_in_ssa_p (cfun))
temp = make_ssa_name (temp, NULL);
gimple_set_lhs (stmt, temp);
post_stmt = gimple_build_assign (lhs, temp);
if (TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = post_stmt;
}
}
break;
}
if (gimple_referenced_vars (cfun))
for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t))
add_referenced_var (t);
if (!gimple_seq_empty_p (pre))
{
if (gimple_in_ssa_p (cfun))
{
gimple_stmt_iterator i;
for (i = gsi_start (pre); !gsi_end_p (i); gsi_next (&i))
mark_symbols_for_renaming (gsi_stmt (i));
}
gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT);
}
if (post_stmt)
gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT);
pop_gimplify_context (NULL);
}
/* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true,
force the result to be either ssa_name or an invariant, otherwise
just force it to be a rhs expression. If VAR is not NULL, make the
base variable of the final destination be VAR if suitable. */
tree
force_gimple_operand (tree expr, gimple_seq *stmts, bool simple, tree var)
{
tree t;
enum gimplify_status ret;
gimple_predicate gimple_test_f;
struct gimplify_ctx gctx;
*stmts = NULL;
if (is_gimple_val (expr))
return expr;
gimple_test_f = simple ? is_gimple_val : is_gimple_reg_rhs;
push_gimplify_context (&gctx);
gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun);
gimplify_ctxp->allow_rhs_cond_expr = true;
if (var)
expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr);
if (TREE_CODE (expr) != MODIFY_EXPR
&& TREE_TYPE (expr) == void_type_node)
{
gimplify_and_add (expr, stmts);
expr = NULL_TREE;
}
else
{
ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue);
gcc_assert (ret != GS_ERROR);
}
if (gimple_referenced_vars (cfun))
for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t))
add_referenced_var (t);
pop_gimplify_context (NULL);
return expr;
}
/* Invokes force_gimple_operand for EXPR with parameters SIMPLE_P and VAR. If
some statements are produced, emits them at GSI. If BEFORE is true.
the statements are appended before GSI, otherwise they are appended after
it. M specifies the way GSI moves after insertion (GSI_SAME_STMT or
GSI_CONTINUE_LINKING are the usual values). */
tree
force_gimple_operand_gsi (gimple_stmt_iterator *gsi, tree expr,
bool simple_p, tree var, bool before,
enum gsi_iterator_update m)
{
gimple_seq stmts;
expr = force_gimple_operand (expr, &stmts, simple_p, var);
if (!gimple_seq_empty_p (stmts))
{
if (gimple_in_ssa_p (cfun))
{
gimple_stmt_iterator i;
for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i))
mark_symbols_for_renaming (gsi_stmt (i));
}
if (before)
gsi_insert_seq_before (gsi, stmts, m);
else
gsi_insert_seq_after (gsi, stmts, m);
}
return expr;
}
#include "gt-gimplify.h"
|
rkb_screen.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "fblas.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < n_dm/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = n_dm / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, cintopt, qcond, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
const int INC1 = 1;
int nn = nbas * nbas;
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
dscal_(&nn, &c1, opt->q_cond, &INC1);
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
const int INC1 = 1;
int nn = nbas * nbas;
double c1 = 1/(.25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]));
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
dscal_(&nn, &c1, opt->q_cond+nbas*nbas, &INC1);
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = cabs(pdm[i*nao+j]);
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
linalg.c | /*
* linalg.c
* Francesco Conti <f.conti@unibo.it>
*
* Copyright (C) 2015 ETH Zurich, University of Bologna
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#include "linalg.h"
#ifdef IPC_CONV16
#include "perf_monitor.h"
#elif IPC_CONV16_INNER
#include "perf_monitor.h"
#endif
#ifdef CCN_HWCE_ACCEL
#include "hwce.h"
#endif
#ifndef PULP_CHIP
#define PULP_CHIP -1
#endif
unsigned int hwce_iter = 0;
/**
* @brief Various versions of the 2d convolution.
*
* Computes the 2d convolution y=W*x (inlined). The baseline
* "algorithmic" version uses nested loops over the output pixel (i,j)
* and the filter tap (ui,uj).
*
* @param *W
* a pointer to the base of the 4d weight tensor. Weights are organized
* in a W[a,b,i,j] fashion, where a is the index of the output feature
* map, b is the index of the input feature map, and (i,j) are the
* coordinates of a pixel in the filter. Data layout on memory is
* row-major. The __restrict__ keyword is used to indicate the compiler
* that W, x, y do not overlap.
* @param *x
* a pointer to the base of the input 3d tensor (a collection of feature
* maps). The inputs are organized in a x[b,i,j] fashion, where b is the
* index of the input feature map, and (i,j) are the coordinates of a
* pixel in the input feature map. The __restrict__ keyword is used to
* indicate the compiler that W, x, y do not overlap.
* @param *y
* a pointer to the base of the output 3d tensor (a collection of feature
* maps. The outputs are organized in a y[a,i,j] fashion, where a is the
* index of the output feature map, and (i,j) are the coordinates of a
* pixel in the output feature map. The __restrict__ keyword is used to
* indicate the compiler that W, x, y do not overlap.
* @param h
* the height of the input feature maps.
* @param w
* the width of the input feature maps.
* @param fs
* the size of the filters.
* @param oh
* the height of the output feature maps.
* @param ow
* the width of the output feature maps.
* @param nif
* the number of input feature maps.
* @param a
* the index of the output feature map.
* @param b
* the index of the input feature map.
*/
#ifdef SUPERDETAILED_DEBUG
static int64_t bigconv = 0;
#endif /* SUPERDETAILED_DEBUG */
#ifdef CCN_PULP_HWCE
int32_t conv16_hwce_11x11_async(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ y_ptr,
int h,
int w,
int nif,
int a,
unsigned qf
) {
hwce_conv_5x5_16x16(W_base, x_base, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 1*nif*28, x_base+6, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 2*nif*28, x_base+12, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 3*nif*28, x_base+6*w, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 4*nif*28, x_base+6*w+6, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 5*nif*28, x_base+6*w+12, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 6*nif*28, x_base+12*w, y_ptr, h-6, w-6, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 7*nif*28, x_base+12*w+6, y_ptr, h-6, w-6, nif, a, qf);
int id = hwce_conv_5x5_16x16_async(W_base + 8*nif*28, x_base+12*w+12, y_ptr, h-6, w-6, nif, a, qf);
return id;
}
int32_t conv16_hwce_7x7_async(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ y_ptr,
int h,
int w,
int nif,
int a,
unsigned qf
) {
hwce_conv_5x5_16x16(W_base, x_base, y_ptr, h-2, w-2, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 1*nif*28, x_base+5, y_ptr, h-2, w-2, nif, a, qf);
hwce_conv_5x5_16x16(W_base + 2*nif*28, x_base+5*w, y_ptr, h-2, w-2, nif, a, qf);
int id = hwce_conv_5x5_16x16_async(W_base + 3*nif*28, x_base+5*w+5, y_ptr, h-2, w-2, nif, a, qf);
return id;
}
int32_t conv16_hwce_5x5_async(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ y_ptr,
int h,
int w,
int nif,
int a,
unsigned qf
) {
int id = hwce_conv_5x5_16x16_async(W_base, x_base, y_ptr, h, w, nif, a, qf);
return id;
}
int32_t conv16_hwce_3x3_async(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ y_ptr,
int h,
int w,
int nif,
int a,
unsigned qf
) {
int id = hwce_conv_5x5_16x16_async(W_base, x_base-w-1, y_ptr, h+2, w+2, nif, a, qf);
return id;
}
#endif /* CCN_PULP_HWCE */
static inline int32_t conv16_unrolled_ptr_loopbody(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ y_ptr,
int w,
int ow,
int fs,
int i,
int j,
unsigned qf
) {
int32_t conv = 0;
int k,l,t;
int16_t *W_ptr = W_base;
int16_t *x_ptr = x_base + i*w + j;
for(k=0; k<fs; k++) {
for(l=0; l<fs; l++) {
#ifdef SUPERDETAILED_DEBUG
#define sign_ext_4(x) \
(int32_t) ((x >> 3) ? (0xfffffff0 | x) : (0x00000000 | x))
int16_t w0 = (*(W_ptr)) & 0x000f;
int16_t w1 = ((*(W_ptr)) & 0x00f0) >> 4;
int16_t w2 = ((*(W_ptr)) & 0x0f00) >> 8;
int16_t w3 = ((*(W_ptr)) & 0xf000) >> 12;
int32_t temp3 = sign_ext_4(w3) * (int32_t)(*x_ptr);
int32_t temp2 = w2 * (int32_t)(*x_ptr);
int32_t temp1 = w1 * (int32_t)(*x_ptr);
int32_t temp0 = w0 * (int32_t)(*x_ptr);
printf(" (%d,%d): (%08x,%08x,%08x,%08x) = %04x * %04x\n", k, l, temp3, temp2, temp1, temp0, (*W_ptr) & 0xffff, (*x_ptr) & 0xffff);
#endif /* SUPERDETAILED_DEBUG */
conv += *W_ptr++ * *x_ptr--;
}
x_ptr -= w-fs;
}
#ifdef SUPERDETAILED_DEBUG
bigconv = conv;
#endif
conv >>= qf;
return conv;
}
static inline void conv16_unrolled_ptr(
int16_t *__restrict__ W_base,
int16_t *__restrict__ x_base,
int16_t *__restrict__ y_ptr,
int w,
int oh,
int ow,
int fs,
int parallel_type,
unsigned qf
) {
register int i;
register int j;
#ifdef FULL_PRECISION
int conv = 0;
#else
int16_t conv = 0;
#endif
#ifdef IPC_CONV16
unsigned int cc=0, sc=0, ic=0, dc=0;
perf_start();
perf_clear();
#elif IPC_CONV16_INNER
unsigned int cc=0, sc=0, ic=0, dc=0;
#endif
if(parallel_type == PARALLEL_PIXEL) {
#ifndef SUPERDETAILED_DEBUG
// #pragma omp for nowait
#else
#pragma omp master
#endif /* ~SUPERDETAILED_DEBUG */
for (i=0; i<oh; i++) {
for (j=0; j<ow; j++) {
conv = conv16_unrolled_ptr_loopbody(W_base, x_base, y_ptr, w, ow, fs, i, j, qf);
#ifdef SUPERDETAILED_DEBUG
printf("(%d,%d): %08x = %08x + %08x\n", i, j, (*(y_ptr + i*ow+j)+conv), (*(y_ptr + i*ow+j)), bigconv);
#endif /* SUPERDETAILED_DEBUG */
#ifdef FULL_PRECISION
#ifndef DONT_SATURATE
// SAT-
conv += *(y_ptr + i*ow+j);
if(conv > +32767) {
#ifdef SUPERDETAILED_DEBUG
printf("SAT+! conv=%08x\n", conv);
#endif /* SUPERDETAILED_DEBUG */
conv = 0x00007fff;
}
// SAT+
else if(conv < -32768) {
#ifdef SUPERDETAILED_DEBUG
printf("SAT-! conv=%08x\n", conv);
#endif /* SUPERDETAILED_DEBUG */
conv = 0xffff8000;
}
*(y_ptr + i*ow+j) = conv;
#else /* DONT_SATURATE */
*(y_ptr + i*ow+j) += conv;
#endif /* DONT_SATURATE */
#else /* ~FULL_PRECISION */
*(y_ptr + i*ow+j) += conv;
#endif /* ~FULL_PRECISION */
}
}
}
else {
for (i=0; i<oh; i++) {
for (j=0; j<ow; j++) {
conv16_unrolled_ptr_loopbody(W_base, x_base, y_ptr, w, ow, fs, i, j, qf);
#ifdef SUPERDETAILED_DEBUG
printf("(%d,%d): %08x = %08x + %08x\n", i, j, (*(y_ptr + i*ow+j)+conv), (*(y_ptr + i*ow+j)), bigconv);
#endif /* SUPERDETAILED_DEBUG */
#pragma omp critical
*(y_ptr + i*ow+j) += conv;
}
}
}
}
static inline void conv16_approx_5x5_critical(
int16_t * __restrict__ y_ptr,
int16_t * __restrict__ conv,
int ow,
int i,
int j
) {
int16_t y_in[2];
unsigned *y_in_big = (unsigned *) (&y_in[0]);
// both versions introduce a small approximation
// #ifndef CONV_APPROX_SINGLE_LDST
y_in[0] = *(y_ptr + i*ow+j);
y_in[1] = *(y_ptr + i*ow+j+1);
y_in[0] += conv[0];
y_in[1] += conv[1];
if(ow-j > 1) {
*(y_ptr + i*ow+j) = y_in[0];
*(y_ptr + i*ow+j + 1) = y_in[1];
}
else
*(y_ptr + i*ow+j) = y_in[0];
// #else
// *y_in_big = *(unsigned *)(y_ptr + i*ow+j);
//
// y_in[1] += conv[0];
// y_in[0] += conv[1];
//
// if(ow-j > 1)
// *(unsigned *)(y_ptr + i*ow+j) = *y_in_big;
// else
// *(y_ptr + i*ow+j) = y_in[1];
// #endif
}
static inline void conv16_optimized_5x5_loopbody(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ conv,
int w,
int ow,
int i,
int j
) {
int k,l;
int16_t *W_ptr = W_base;
int16_t *x_ptr = x_base + i*w + j;
for(k=0; k<5; k++) {
for(l=0; l<5; l++) {
register int16_t W_loc = *W_ptr++;
// unsigned x_loc = *((unsigned *) (x_ptr-1));
unsigned x_loc = *((unsigned *) x_ptr);
x_ptr--;
conv[0] += (W_loc * ((int16_t *) &x_loc)[1]) >> QF;
conv[1] += (W_loc * ((int16_t *) &x_loc)[0]) >> QF;
}
x_ptr -= w-5;
}
}
static inline void conv16_approx_5x5_loopbody(
int16_t * __restrict__ W_base,
int16_t * __restrict__ x_base,
int16_t * __restrict__ conv,
int w,
int ow,
int i,
int j
) {
int k,l;
int16_t *W_ptr = W_base;
int16_t *x_ptr = x_base + i*w + j;
register vect32_t conv_big = {0,0};
for(k=0; k<5; k++) {
for(l=0; l<5; l++) {
register int16_t W_loc = *W_ptr++;
register vect16_t x_loc = *((vect16_t *) x_ptr--);
conv_big[0] += W_loc * x_loc[0];
conv_big[1] += W_loc * x_loc[1];
}
x_ptr -= w-5;
}
conv[0] = conv_big[0] >> QF;
conv[1] = conv_big[1] >> QF;
}
static inline void conv16_approx_5x5(
int16_t *__restrict__ W_base,
int16_t *__restrict__ x_base,
int16_t *__restrict__ y_ptr,
int w,
int oh,
int ow,
int parallel_type
) {
register int i,j,k,l;
int16_t conv[2] = {0,0};
int16_t y_in[2];
unsigned *y_in_big = (unsigned *) (&y_in[0]);
if(parallel_type == PARALLEL_PIXEL) {
// #pragma omp for nowait
for (i=0; i<oh; i++) {
for (j=0; j<ow; j+=2) {
conv16_approx_5x5_loopbody(W_base, x_base, conv, w, ow, i, j);
#if 0
conv16_approx_5x5_critical(y_ptr, conv, w, i, j);
#else
{
y_in[0] = *(y_ptr + i*ow+j);
y_in[1] = *(y_ptr + i*ow+j+1);
y_in[0] += conv[0];
y_in[1] += conv[1];
if(ow-j > 1) {
*(y_ptr + i*ow+j) = y_in[0];
*(y_ptr + i*ow+j + 1) = y_in[1];
}
else {
*(y_ptr + i*ow+j) = y_in[0];
}
}
#endif
}
}
}
else {
for (i=0; i<oh; i++) {
for (j=0; j<ow; j+=2) {
conv16_approx_5x5_loopbody(W_base, x_base, conv, w, ow, i, j);
#pragma omp critical
#if 0
conv16_approx_5x5_critical(y_ptr, conv, w, i, j);
#else
{
y_in[0] = *(y_ptr + i*ow+j);
y_in[1] = *(y_ptr + i*ow+j+1);
y_in[0] += conv[0];
y_in[1] += conv[1];
if(ow-j > 1) {
*(y_ptr + i*ow+j) = y_in[0];
*(y_ptr + i*ow+j + 1) = y_in[1];
}
else
*(y_ptr + i*ow+j) = y_in[0];
}
#endif
}
}
}
}
static inline void conv16_gold(int16_t *__restrict__ W, int16_t *__restrict__ x, int16_t *__restrict__ y, int h, int w, int fs, int oh, int ow, int nif, int a, int b) {
int i;
// #pragma omp for schedule(static)
for (i=0; i<oh; i++) {
int j;
for (j=0; j<ow; j++) {
int ui;
int16_t conv = 0;
for (ui=0; ui<fs; ui++) {
int uj;
for (uj=0; uj<fs; uj++) {
int m;
int n;
m = i-ui+fs-1;
n = j-uj+fs-1;
conv += ccn_mult(W[((((a*nif)+b)*fs)+ui)*fs+uj] , x[(((b*h)+m)*w)+n]);
}
}
y[(a*oh+i)*ow+j] = (y[(a*oh+i)*ow+j] + conv);
}
}
}
/**
* @brief Computes the 2d convolution over all feature maps.
*
* For each output feature map, loops over input feature maps, computes the
* convolutions and sums them as per the definition of 2d convolution of a 4d
* weight tensor by a 2d input tensor.
*
* @param *W
* a pointer to the base of the 4d weight tensor. Weights are organized
* in a W[a,b,i,j] fashion, where a is the index of the output feature
* map, b is the index of the input feature map, and (i,j) are the
* coordinates of a pixel in the filter. Data layout on memory is
* row-major. The __restrict__ keyword is used to indicate the compiler
* that W, x, y do not overlap.
* @param *x
* a pointer to the base of the input 3d tensor (a collection of feature
* maps). The inputs are organized in a x[b,i,j] fashion, where b is the
* index of the input feature map, and (i,j) are the coordinates of a
* pixel in the input feature map. The __restrict__ keyword is used to
* indicate the compiler that W, x, y do not overlap.
* @param *y
* a pointer to the base of the output 3d tensor (a collection of feature
* maps. The outputs are organized in a y[a,i,j] fashion, where a is the
* index of the output feature map, and (i,j) are the coordinates of a
* pixel in the output feature map. The __restrict__ keyword is used to
* indicate the compiler that W, x, y do not overlap.
* @param h
* the height of the input feature maps.
* @param w
* the width of the input feature maps.
* @param fs
* the size of the filters.
* @param a
* the index of output feature maps.
* @param nif
* the number of input feature maps.
* @param parallel_type
* the type of parallelization.
*/
#ifdef CCN_HWCE_ACCEL
void linalg_2dconv_hwce(
data_t *__restrict__ W,
data_t *__restrict__ x,
data_t *__restrict__ y,
int h,
int w,
int fs,
int a,
int nif,
int parallel_type,
unsigned qf
) {
int oh = h-fs+1;
int ow = w-fs+1;
int id = -1;
int b=0;
int16_t *y_ptr = y + a*oh*ow;
int16_t *x_base = x + b*h*w;
int16_t *W_base;
switch(fs) {
case 3:
W_base = W + a*nif*28 + b*28;
id = conv16_hwce_3x3_async(W_base, x_base, y_ptr, h, w, nif, 0, qf);
break;
case 5:
W_base = W + a*nif*28 + b*28;
id = conv16_hwce_5x5_async(W_base, x_base, y_ptr, h, w, nif, 0, qf);
break;
case 7:
W_base = W + a*nif*4*28 + b*4*28;
id = conv16_hwce_7x7_async(W_base, x_base, y_ptr, h, w, nif, 0, qf);
break;
case 11:
W_base = W + a*nif*9*28 + b*9*28;
id = conv16_hwce_11x11_async(W_base, x_base, y_ptr, h, w, nif, 0, qf);
break;
default:
break;
}
hwce_wait(id);
}
#endif /* CCN_HWCE_ACCEL */
void linalg_2dconv(
data_t *__restrict__ W,
data_t *__restrict__ x,
data_t *__restrict__ y,
int h,
int w,
int fs,
int a,
int nif,
int parallel_type,
unsigned qf
) {
int oh = h-fs+1;
int ow = w-fs+1;
int b,ri;
// #pragma omp parallel
{
if(parallel_type==PARALLEL_FEAT) {
// #pragma omp for nowait
for (b=0; b<nif; b++) {
int16_t *y_ptr = y + a*oh*ow;
int16_t *x_base = x + b*h*w + (fs-1)*w + (fs-1);
#if PULP_CHIP == CHIP_MIA || PULP_CHIP == CHIP_PULP3 || PULP_CHIP == CHIP_FULMINE || PULP_CHIP == CHIP_HONEY
int16_t *W_base = W + a*nif*MULTIPLE4(fs*fs) + b*MULTIPLE4(fs*fs);
#else /* ~CHIP_MIA && ~CHIP_PULP3 */
int16_t *W_base = W + a*nif*fs*fs + b*fs*fs;
#endif /* ~CHIP_MIA && ~CHIP_PULP3 */
#ifdef CONV_APPROX
conv16_approx(W_base, x_base, y_ptr, w, oh, ow, fs, parallel_type);
#else
conv16_unrolled_ptr(W_base, x_base, y_ptr, w, oh, ow, fs, parallel_type, qf);
#endif
}
}
else {
// #pragma omp parallel
for (b=0; b<nif; b++) {
int16_t *y_ptr = y + a*oh*ow;
int16_t *x_base = x + b*h*w + (fs-1)*w + (fs-1);
#if PULP_CHIP == CHIP_MIA || PULP_CHIP == CHIP_PULP3 || PULP_CHIP == CHIP_FULMINE || PULP_CHIP == CHIP_HONEY
int16_t *W_base = W + a*nif*MULTIPLE4(fs*fs) + b*MULTIPLE4(fs*fs);
#else /* ~CHIP_MIA && ~CHIP_PULP3 */
int16_t *W_base = W + a*nif*fs*fs + b*fs*fs;
#endif /* ~CHIP_MIA && ~CHIP_PULP3 */
#ifdef CONV_APPROX
conv16_approx(W_base, x_base, y_ptr, w, oh, ow, fs, parallel_type);
#else
conv16_unrolled_ptr(W_base, x_base, y_ptr, w, oh, ow, fs, parallel_type, qf);
#endif
}
}
}
}
/*
void linalg_mvprod:
computes the matrix by vector product. Unused because eigen_mvprod is
way more performant.
params:
- data_t *A:
a pointer to the base of the 2d weight matrix. Weights are organized in
a A[b,i,j,a,oi,oj] fashion, where b is the index of the input feature
map, (i,j) are the coordinates of a pixel in the input feature map, a is
the index of the output feature map and (oi,oj) are the coordinates of a
pixel in the output feature map. This 6d tensor is treated as a
A[b_i_j,a_oi_oj] 2d matrix for the purpose of this calculation.
Data layout for the matrix is *column-major* (due to a limit in
PyConvNet), therefore from a C row-major perspective the real tensor
layout is A[a,oi,oj,b,i,j]. The __restrict__ keyword is used to indicate
the compiler that A, x, y do not overlap.
- data_t *x:
a pointer to the base of the input 3d tensor (a collection of feature
maps). The inputs are organized in a x[b,i,j] fashion, where b is the
index of the input feature map, and (i,j) are the coordinates of a pixel
in the input feature map. The __restrict__ keyword is used to indicate
the compiler that A, x, y do not overlap.
- data_t *y:
a pointer to the base of the output 3d tensor (a collection of feature
maps). The outputs are organized in a y[a,oi,oj] fashion, where a is the
index of the output feature map, and (oi,oj) are the coordinates of a
pixel in the output feature map. The __restrict__ keyword is used to
indicate the compiler that A, x, y do not overlap.
- int M:
the number of rows in the A matrix, i.e. the product of the number of
input feature maps, the height of an input feature map and its width.
- int N:
the number of columns in the A matrix, i.e. the product of the number of
output feature maps, the height of an output feature map and its width.
*/
void linalg_mvprod(data_t *__restrict__ A, data_t *__restrict b, data_t *__restrict__ x, data_t *__restrict__ y, int M, int N, unsigned qf) {
#pragma omp parallel for
for(int n=0; n<N; n++) {
int accum = y[n] << qf;
for(int m=0; m<M; m++) {
accum += A[m*N+n] * x[m];
}
#ifndef DONT_SATURATE
// SAT-
accum >>= qf;
if(accum > +32767) {
accum = 0x00007fff;
}
// SAT+
else if(accum < -32768) {
accum = 0xffff8000;
}
#endif /* DONT_SATURATE */
y[n] = accum;
}
// data_t *yp = malloc(sizeof(data_t)*N);
// memset(yp, 0, sizeof(data_t)*N);
// plp_matmul_i16_norm(A, x, yp, N, M, 1, PLPLIB_NORM_SHIFT(qf));
// #pragma omp parallel for
// for(int n=0; n<N; n++) {
// y[n] += yp[n];
// }
// free(yp);
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 3;
/*! \brief version that contains qid field */
static const int kVersionWithQid = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
void Push(const Inst &inst);
size_t Size() { return offset.Size() - 1; }
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
BatchIterator<T> begin() { return begin_iter_; }
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
template<typename T>
class DataSource : public dmlc::DataIter<T> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches();
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource<SparsePage>>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
const size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches() {
return GetRowBatches();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches() {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches() {
return GetSortedColumnBatches();
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
omp_ex_15.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
int main()
{
unsigned int a = 90;
printf("Before a = %i\n", a);
#pragma omp parallel firstprivate(a)
{
a += 10 + omp_get_thread_num();
printf("Inside a = %i\n", a);
}
printf("After a = %i\n", a);
return 0;
}
|
privatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// tmp should be put as private to avoid race condition
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
int a[100];
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
printf("a[50]=%d\n", a[50]);
return 0;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
World.h | #ifndef __WORLD__H__
#define __WORLD__H__
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseCholesky>
#include <vector>
#include <set>
#include <Eigen/Dense>
#include "Constraint/ConstraintHeader.h"
#include "../utils/collisionBrutal.h"
#include "../constants.h"
namespace FEM
{
// class Constraint;
template <typename TinyScalar, typename TinyConstants>
class World
{
public:
World(
TinyScalar time_step = 1.0/100.0,
int max_iteration = 30,
TinyScalar damping_coeff = 0.999,
bool is_precessing_collision = false
);
~World();
void Initialize();
void Reset();
void AddBody(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x0,
const std::vector<Constraint<TinyScalar, TinyConstants>*>& c,
const std::vector<TinyScalar>& masses,
const std::vector<int>& contactIdx,
const std::vector<Eigen::Vector3i>& contactFace);
void AddBody(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x0,
const std::vector<Constraint<TinyScalar, TinyConstants>*>& c,
const std::vector<TinyScalar>& masses,
const std::vector<int>& contactIdx,
const std::vector<Eigen::Vector3i>& contactFace,
const std::vector<std::vector<int>> &rigidBodyIndices,
const std::vector<int> &nonrigid,
const std::vector<int> &rigid,
const std::vector<Link<TinyScalar, TinyConstants>*> &links);
void AddBody(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x0,
const std::vector<int>& contactIdx,
const std::vector<Eigen::Vector3i>& contactFace);
void AddConstraint(Constraint<TinyScalar, TinyConstants>* c);
void RemoveConstraint(Constraint<TinyScalar, TinyConstants>* c);
void TimeStepping(bool isIntegrated = true);
// void TimeSteppingRigid(bool isIntegrated = true);
void UpdatePositionsAndVelocities(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x_n1);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ProjectiveDynamicsMethod();
void PreComputation();
void EvaluateJMatrix(Eigen::SparseMatrix<TinyScalar>& J);
void EvaluateLMatrix(Eigen::SparseMatrix<TinyScalar>& L);
void EvaluateAMatrix();
void EvaluateDVector(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x,Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& d);
void FactorizeLDLT(const Eigen::SparseMatrix<TinyScalar>& A,Eigen::SimplicialLDLT<Eigen::SparseMatrix<TinyScalar>>& ldltSolver);
void SetExternalForce(Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> external_force);
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& GetExternalForce() {return mExternalForces;};
const TinyScalar& GetTimeStep(){return mTimeStep;};
const TinyScalar& GetTime(){return mTime;};
const std::vector<Constraint<TinyScalar, TinyConstants>*>& GetConstraints(){return mConstraints;};
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& GetPositions(){return mX;};
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& GetVelocities(){return mV;};
const int& GetNumVertices(){return mNumVertices;};
void do_solve_friction(Eigen::Matrix<TinyScalar, 3, 1> &p0, TinyScalar m0, Eigen::Matrix<TinyScalar, 3, 1> &r0, std::vector<Eigen::Matrix<TinyScalar, 3, 1>> &ps, std::vector<TinyScalar> &ms, std::vector<Eigen::Matrix<TinyScalar, 3, 1>> &rs, std::vector<bool> &stick, TinyScalar mu);
// private:
bool mIsInitialized;
bool mIsCollision;
std::vector<Constraint<TinyScalar, TinyConstants>*> mConstraints;
int mConstraintDofs;
int mNumVertices;
TinyScalar mTimeStep;
TinyScalar mTime;
int mFrame;
int mMaxIteration, mMaxIteration1;
TinyScalar mDampingCoefficient;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mX,mV, mNonrigidX,mJointQ, mJointQd;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mInitX,mInitV;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mExternalForces;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mQn; // (qt + hvt + h^2M^-1fext)
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> m_rhs_speed;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mJointTorque;
std::vector<TinyScalar> mUnitMass;
Eigen::SparseMatrix<TinyScalar> mMassMatrix;
Eigen::SparseMatrix<TinyScalar> mInvMassMatrix;
Eigen::SparseMatrix<TinyScalar> mIdentityMatrix;
Eigen::SparseMatrix<TinyScalar> mJ,mL;
Eigen::SparseMatrix<TinyScalar> m_A;
Eigen::SparseMatrix<TinyScalar> m_ATA;
Eigen::SparseMatrix<TinyScalar> mMh2L;
Eigen::SimplicialLDLT<Eigen::SparseMatrix<TinyScalar>>
mDynamicSolver,mQuasiStaticSolver,mDynamicSolverM;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mV_without_collision;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mCollision_V_next;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mCollision_X_next;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mPosition_current_next;
TinyScalar self_collision_tolerance = 0.01; // TODO
TinyScalar m_self_collision_tol2;
bool m_handle_self_collision = true;
int m_current_index;
int m_next_index;
/// @brief (IO)
std::vector<TinyScalar> m_rhs_base_times;
/// @brief (IO)
std::vector<TinyScalar> m_friction_times;
/// @brief (IO)
std::vector<TinyScalar> m_self_friction_times;
std::vector<TinyScalar> m_self_collision_ordering_times;
using ForceType = Eigen::Matrix<TinyScalar, 3, -1, Eigen::RowMajor>;
/// @brief Storage needed to handle self friction
ForceType m_contact_forces[2];
/// @brief Storage needed to handle self friction
ForceType m_self_contact_forces[2];
/// @brief Storage needed to handle self friction
ForceType m_self_contact_repercusion_forces[2];
/// @brief Storage needed to handle self friction
Eigen::Matrix<TinyScalar, 3, Eigen::Dynamic> m_alpha[2];
struct SelfCollisionGraphNeighboor
{
/**
* The index of vertex adjacent through this edge.
*/
std::size_t index;
/**
* The index of the self-collision represented by this index.
*/
std::size_t collision_index;
};
/**
* An adjacency list representing the self-collision graph. The vertices of
* this graph are the vertices of the mesh. The edges are the self-collision
* between the vertices.
* @see SelfCollisionGraphNeighboor
* @see computeSelfCollisionGraph
*/
int mNumContactVertices;
std::map<int, int> mMapAll2Contact;
std::vector<Eigen::Vector3i> mContactFace, mObsFace;
std::vector<int> mContactIdx;
CollisionMesh<TinyScalar, TinyConstants>* mCollisionMesh;
// Left for children's equations
// TODO : make it clean
/// @brief Right hand side of the global step equation
Eigen::Matrix<TinyScalar, 3, -1, Eigen::RowMajor> m_rhs;
std::vector<TinyScalar> m_masses;
CollisionBrutal<TinyScalar, TinyConstants>* mCollisionDetector;
void computeCollisionComputationOrder() noexcept;
std::vector<int> CollisionHandling();
TinyScalar getVertexMass(size_t vertex_index) const;
void updateCollisionScene() noexcept;
void convertCollisionIndexBack();
void convertCollisionIndex();
void updateCollisionInIter(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& v, const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x);
void ComputeV0NTr();
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ComputeOriX(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &x_n1);
void ComputeSolution(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &deltar,
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &ori_c,
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &hvf,
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &s);
std::vector<std::vector<int>> mrigidBodies;
int mDof, mStoreDof;
TinyScalar mAddTorque, mAlpha, mFriction;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> QbMmQcLQfm1QcLT, QbMmQcLMfm1QcLT;
Eigen::SparseMatrix<TinyScalar> mH2ML, Qf, Qc, Qb, Qc_L, Qb_M, mLf, h2Mf, mC;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> mcf, mcs, V0;
std::vector<int> mnonrigidIdx, mrigidIdx, mrigidSizes;
std::vector<Transformation<TinyScalar, TinyConstants> > T, Tr;
std::vector<Link<TinyScalar, TinyConstants>*> mlinks;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v_n1;
std::vector<Eigen::Matrix<TinyScalar, 3, 1> > mCollisionMeshPositions, mObsPositions;
ArcsimMesh *mMesh, *mObsMesh;
Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic> mperm;
};
#define EPS 5e-7
template <typename TinyScalar, typename TinyConstants>
World<TinyScalar, TinyConstants>::
World(
TinyScalar time_step,
int max_iteration,
TinyScalar damping_coeff,
bool is_precessing_collision)
:mTimeStep(time_step),
mFrame(0),
mMaxIteration(max_iteration),
mDampingCoefficient(damping_coeff),
mNumVertices(0),
mNumContactVertices(0),
mConstraintDofs(0),
mIsCollision(is_precessing_collision),
mIsInitialized(false)
{
mDof = 0;
mStoreDof = 0;
}
template <typename TinyScalar, typename TinyConstants>
World<TinyScalar, TinyConstants>::
~World() {
delete mCollisionMesh;
delete mCollisionDetector;
for(auto c : mConstraints)
{
delete c;
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
Initialize()
{
// mIsCollision = m_handle_self_collision = false;
mTime = 0.0;
mConstraintDofs = 0;
for(auto c : mConstraints)
{
mConstraintDofs += c->GetDof();
}
mV.resize(3*mNumVertices);
mV.setZero();
mIdentityMatrix.resize(3*mNumVertices,3*mNumVertices);
mMassMatrix.resize(3*mNumVertices,3*mNumVertices);
mInvMassMatrix.resize(3*mNumVertices,3*mNumVertices);
std::vector<Eigen::Triplet<TinyScalar>> i_triplets;
std::vector<Eigen::Triplet<TinyScalar>> m_triplets;
std::vector<Eigen::Triplet<TinyScalar>> inv_m_triplets;
i_triplets.reserve(3*mNumVertices);
m_triplets.reserve(3*mNumVertices);
inv_m_triplets.reserve(3*mNumVertices);
for(int i=0;i<mNumVertices;i++)
{
m_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+0,3*i+0,mUnitMass[i]));
m_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+1,3*i+1,mUnitMass[i]));
m_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+2,3*i+2,mUnitMass[i]));
inv_m_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+0,3*i+0,1.0/mUnitMass[i]));
inv_m_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+1,3*i+1,1.0/mUnitMass[i]));
inv_m_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+2,3*i+2,1.0/mUnitMass[i]));
i_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+0,3*i+0,1.0));
i_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+1,3*i+1,1.0));
i_triplets.push_back(Eigen::Triplet<TinyScalar>(3*i+2,3*i+2,1.0));
}
mMassMatrix.setFromTriplets(m_triplets.cbegin(), m_triplets.cend());
mInvMassMatrix.setFromTriplets(inv_m_triplets.cbegin(), inv_m_triplets.cend());
mIdentityMatrix.setFromTriplets(i_triplets.cbegin(), i_triplets.cend());
mExternalForces.resize(3*mNumVertices);
mExternalForces.setZero();
mQn.resize(3*mNumVertices);
mQn.setZero();
mInitX = mX;
mInitV = mV;
// collision
if (m_handle_self_collision)
{
m_current_index = 0;
m_next_index = 1;
for (int i = 0; i < 2; ++i)
{
m_contact_forces[i].setZero(3, mNumContactVertices);
m_self_contact_forces[i].setZero(3, mNumContactVertices);
m_self_contact_repercusion_forces[i].setZero(3, mNumContactVertices);
m_alpha[i].setZero(3, mNumContactVertices);
} // i
} // m_handle_self_collision
// std::vector<Eigen::Matrix<TinyScalar, 3, 1>> positions;
std::vector<int> triangles;
// for (int i = 0; i < mContactIdx.size(); i++) {
// int id = mContactIdx[i];
// Eigen::Matrix<TinyScalar, 3, 1> v(mX[3*id], mX[3*id+1], mX[3*id+2]);
// positions.push_back(v);
// }
for (int i = 0; i < mContactFace.size(); i++) {
triangles.push_back(mContactFace[i][0]);
triangles.push_back(mContactFace[i][1]);
triangles.push_back(mContactFace[i][2]);
}
// mCollisionMesh = new CollisionMesh<TinyScalar, TinyConstants>(positions, triangles);
// mCollisionDetector = new CollisionBrutal<TinyScalar, TinyConstants>(mCollisionMesh, true);
mCollisionMesh = new CollisionMesh<TinyScalar, TinyConstants>(mCollisionMeshPositions, triangles);
mObsMesh = new ArcsimMesh();
mObsMesh->Initialize(
helper::to_eigen_double<TinyScalar, TinyConstants>(mObsPositions),
helper::to_eigen_double<TinyScalar, TinyConstants>(mObsPositions),
mObsFace);
mMesh = new ArcsimMesh();
mMesh->Initialize(
helper::to_eigen_double<TinyScalar, TinyConstants>(mCollisionMeshPositions),
helper::to_eigen_double<TinyScalar, TinyConstants>(mCollisionMeshPositions),
mContactFace);
mCollisionDetector = new CollisionBrutal<TinyScalar, TinyConstants>(mCollisionMesh, true);
mCollisionDetector->mObsMesh = mObsMesh;
mCollisionDetector->mMesh = mMesh;
mCollisionDetector->m_generalized_positions.resize(mNumContactVertices*3);
mCollisionDetector->m_generalized_speeds.resize(mNumContactVertices*3);
mCollisionDetector->mCollisionMesh->m_positions.resize(mNumContactVertices*3);
mCollisionDetector->mCollisionMesh->m_speed.resize(mNumContactVertices*3);
m_rhs = Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic>::Zero(
3, mContactIdx.size());
mCollisionDetector->m_handle_self_collision = m_handle_self_collision;
mCollision_V_next.resize(mNumContactVertices*3);
mCollision_X_next.resize(mNumContactVertices*3);
std::cout<<"m_rhs rows: "<<m_rhs.rows()<<std::endl;
std::cout<<"m_rhs cols: "<<m_rhs.cols()<<std::endl;
// collision
// rigid bodies
T.resize(mrigidBodies.size());
Tr.resize(mrigidBodies.size());
mJointQ.resize(mStoreDof);
mJointQd.resize(mStoreDof);
for (int i = 0; i < mrigidBodies.size(); ++i)
mlinks[i]->WriteT(mJointQ, mJointQd);
mJointTorque.resize(std::max(0, int(mrigidBodies.size()-1) ));
mJointTorque.setZero();
// rigid bodies
// PreComputation();
PreComputation();
mIsInitialized = true;
std::cout<<"Total degree of freedom : "<<mX.rows()<<std::endl;
std::cout<<"Total constraints : "<<mConstraints.size()<<std::endl;
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
Reset()
{
mX = mInitX;
mV = mInitV;
mTime = 0.0;
}
template <typename TinyScalar, typename TinyConstants>
TinyScalar World<TinyScalar, TinyConstants>::
getVertexMass(size_t vertex_index) const
{
return mH2ML.coeff(mContactIdx[vertex_index]*3,mContactIdx[vertex_index]*3);
// return mUnitMass[mContactIdx[vertex_index]];
// return 1.0;
// return mCollisionDetector->mCollisionMesh->m_masses[vertex_index];
}
// add cloth body
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
AddBody(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x0,
const std::vector<Constraint<TinyScalar, TinyConstants>*>& c,
const std::vector<TinyScalar>& masses,
const std::vector<int>& contactIdx,
const std::vector<Eigen::Vector3i>& contactFace)
{
std::cout << contactFace.size() << " addbody contactFace\n";
std::cout << contactIdx.size() << " addbody contactIdx\n";
for (const auto& cidx : contactIdx) {
int id = cidx + mNumVertices;
if (std::find(mContactIdx.begin(), mContactIdx.end(), id)
== mContactIdx.end() ) {
mContactIdx.push_back(id);
mMapAll2Contact.insert(std::make_pair(id, mNumContactVertices++));
mCollisionMeshPositions.push_back(x0.template segment<3>(cidx*3));
}
}
for (const auto& f : contactFace) {
int f0, f1, f2;
f0 = mMapAll2Contact[f[0]+mNumVertices];
f1 = mMapAll2Contact[f[1]+mNumVertices];
f2 = mMapAll2Contact[f[2]+mNumVertices];
mContactFace.push_back(Eigen::Vector3i(f0, f1, f2));
}
int nv=(x0.rows()/3);
for (int i = 0; i < nv*3; ++i)
mnonrigidIdx.push_back(mNumVertices*3 + i);
mNumVertices+=nv;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> tmpX = mX;
// tmpX.resize(mNumVertices*3);
mX.resize(mNumVertices*3);
mX.head(tmpX.rows()) = tmpX;
mX.tail(x0.rows()) = x0;
for (auto con : c) {
con->fixIndex(mNumVertices - nv);
}
mConstraints.insert(mConstraints.end(),c.begin(),c.end());
for(int i=0;i<nv;i++){
mUnitMass.push_back(masses[i]);
}
if(mIsInitialized)
Initialize();
}
//add obstacle body
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
AddBody(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x0,
const std::vector<int>& contactIdx,
const std::vector<Eigen::Vector3i>& contactFace)
{
int offset = mObsPositions.size();
for (const auto& cidx : contactIdx) {
// mContactIdx.push_back(-1);
// mCollisionMeshPositions.push_back(x0.segment<3>(cidx*3));
mObsPositions.push_back(x0.template segment<3>(cidx*3));
}
// mNumContactVertices += contactIdx.size();
for (const auto& f : contactFace) {
int f0, f1, f2;
f0 = f[0]+offset;
f1 = f[1]+offset;
f2 = f[2]+offset;
mObsFace.push_back(Eigen::Vector3i(f0, f1, f2));
}
if(mIsInitialized)
Initialize();
}
//add rtq8 soft body
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
AddBody(
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x0,
const std::vector<Constraint<TinyScalar, TinyConstants>*>& c,
const std::vector<TinyScalar>& masses,
const std::vector<int>& contactIdx,
const std::vector<Eigen::Vector3i>& contactFace,
const std::vector<std::vector<int>> &rigidBodyIndices,
const std::vector<int> &nonrigid,
const std::vector<int> &rigid,
const std::vector<Link<TinyScalar, TinyConstants>*> &links)
{
printf("AddBody rigid bodies\n");
int offset = mNumVertices, rigidoffset = mrigidIdx.size()/3;
for (int i = 0; i < rigidBodyIndices.size(); ++i) {
auto tmp = rigidBodyIndices[i];
for (int i = 0; i < tmp.size(); ++i)
tmp[i] += offset;
Link<TinyScalar, TinyConstants> *link = links[i];
link->bodyIdx = mrigidBodies.size();
mlinks.push_back(link);
link->rigidoffset = rigidoffset;
rigidoffset += tmp.size();
link->dofIdx = mDof;
link->storedofIdx = mStoreDof;
if (link->parent == NULL) {
mDof += 6;
mStoreDof += 12;
}
else {
mDof += 1;
mStoreDof += 1;
}
mrigidBodies.push_back(tmp);
mrigidSizes.push_back(tmp.size());
link->indices = tmp;
}
for (int idx : nonrigid)
mnonrigidIdx.push_back(offset * 3 + idx);
for (int idx : rigid)
mrigidIdx.push_back(offset * 3 + idx);
int k = 0;
for (auto &idxs : mrigidBodies) {
for (int i = 0; i < idxs.size(); ++i) {
if (mrigidIdx[k] != idxs[i]*3 || mrigidIdx[k+1] != idxs[i]*3+1 || mrigidIdx[k+2] != idxs[i]*3+2)
std::cout << "???" << mrigidIdx[k] << " " << idxs[i]<< std::endl;
k += 3;
}
}
// collision
std::cout << contactFace.size() << " addbody contactFace\n";
std::cout << contactIdx.size() << " addbody contactIdx\n";
for (const auto& cidx : contactIdx) {
int id = cidx + mNumVertices;
if (std::find(mContactIdx.begin(), mContactIdx.end(), id)
== mContactIdx.end() ) {
mContactIdx.push_back(id);
mMapAll2Contact.insert(std::make_pair(id, mNumContactVertices++));
mCollisionMeshPositions.push_back(x0.template segment<3>(cidx*3));
}
}
for (const auto& f : contactFace) {
int f0, f1, f2;
f0 = mMapAll2Contact[f[0]+mNumVertices];
f1 = mMapAll2Contact[f[1]+mNumVertices];
f2 = mMapAll2Contact[f[2]+mNumVertices];
mContactFace.push_back(Eigen::Vector3i(f0, f1, f2));
}
// collision end
int nv=(x0.rows()/3);
mNumVertices+=nv;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> tmpX = mX;
// tmpX.resize(mNumVertices*3);
mX.resize(mNumVertices*3);
mX.head(tmpX.rows()) = tmpX;
mX.tail(x0.rows()) = x0;
for (auto con : c) {
con->fixIndex(mNumVertices - nv);
}
mConstraints.insert(mConstraints.end(),c.begin(),c.end());
for(int i=0;i<nv;i++){
mUnitMass.push_back(masses[i]);
}
if(mIsInitialized)
Initialize();
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
AddConstraint(Constraint<TinyScalar, TinyConstants>* c)
{
mConstraints.push_back(c);
if(mIsInitialized){
mConstraintDofs = 0;
for(auto c : mConstraints){
mConstraintDofs += c->GetDof();
}
PreComputation();
// PreComputation();
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
RemoveConstraint(Constraint<TinyScalar, TinyConstants>* c)
{
bool isRemoved = false;
for(int i=0;i<mConstraints.size();i++)
{
if(mConstraints[i]==c) {
mConstraints.erase(mConstraints.begin()+i);
isRemoved = true;
break;
}
}
if(isRemoved) {
if(mIsInitialized) {
mConstraintDofs = 0;
for(auto c : mConstraints){
mConstraintDofs += c->GetDof();
}
PreComputation();
}
}
}
// template <typename TinyScalar, typename TinyConstants>
// void World<TinyScalar, TinyConstants>::
// TimeStepping(bool isIntegrated)
// {
// if(!mIsInitialized) {
// std::cout<<"Engine not initialized."<<std::endl;
// return;
// }
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1(mNumVertices*3);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v_n1(mNumVertices*3);
// mV_without_collision = mV + mTimeStep*(mInvMassMatrix*mExternalForces);
// // (qt + hvt + h^2M^-1fext)
// mQn = mX + mTimeStep*mV_without_collision;
// mPosition_current_next = mQn;
// auto tmp = mTimeStep*(mInvMassMatrix*mExternalForces);
// x_n1=ProjectiveDynamicsMethod();
// // v_n1=ProjectiveDynamicsMethod();
// // UpdatePositionsAndVelocities(v_n1);
// UpdatePositionsAndVelocities(x_n1);
// // mV *= mDampingCoefficient;
// updateCollisionScene();
// if(isIntegrated)
// {
// mTime += mTimeStep;
// mFrame++;
// }
// }
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
TimeStepping(bool isIntegrated)
{
if(!mIsInitialized) {
std::cout<<"Engine not initialized."<<std::endl;
return;
}
for (int i = 0; i < mrigidBodies.size(); ++i)
mlinks[i]->ReadT(mJointQ, mJointQd);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1(mNumVertices*3);
// (qt + hvt + h^2M^-1fext)
// mExternalForces[0] = 1;
// std::cout << mInvMassMatrix << " mInvMassMatrix\n";
// std::cout << mV[1] << " mV\n";
// std::cout << mExternalForces[1] << " mExternalForces\n";
mV_without_collision = mV + mTimeStep*(mInvMassMatrix*mExternalForces);
// std::cout << mV[1] << " mV\n";
// std::cout << mExternalForces[1] << " mExternalForces\n";
// std::cout << mV_without_collision[1] << " mV_without_collision\n";
// (qt + hvt + h^2M^-1fext)
mQn = mX + mTimeStep*mV_without_collision;
updateCollisionScene();
// std::cout << mQn[1] << " mQn\n";
x_n1 = ProjectiveDynamicsMethod();
// std::cout << x_n1[1] << " x_n1\n";
// std::cout << mX[1] << " mX\n";
UpdatePositionsAndVelocities(x_n1);
mV *= mDampingCoefficient;
// std::cout << mV[1] << " mV 2\n";
updateCollisionScene();
for (int i = 0; i < mrigidBodies.size(); ++i)
mlinks[i]->WriteT(mJointQ, mJointQd);
if(isIntegrated)
{
mTime += mTimeStep;
mFrame++;
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
updateCollisionScene() noexcept
{
for (int i = 0; i < mContactIdx.size(); i++) {
int id = mContactIdx[i];
for (int j = 0; j < 3; j++) {
mCollisionDetector->m_generalized_positions[3*i+j] = mX[3*id+j];
mCollisionDetector->m_generalized_speeds[3*i+j] = mV[3*id+j];
mCollisionDetector->mCollisionMesh->m_positions[3*i+j] = mX[3*id+j];
mCollisionDetector->mCollisionMesh->m_speed[3*i+j] = mV[3*id+j];
}
mCollisionDetector->mMesh->nodes[i]->x0 = Vec3(
TinyConstants::getDouble(mX[3*id+0]) ,
TinyConstants::getDouble(mX[3*id+1]) ,
TinyConstants::getDouble(mX[3*id+2]));
}
// Cleaning the forces for the next step
if (m_handle_self_collision)
{
m_current_index = 0u;
m_next_index = 1u;
for (unsigned int i = 0u; i < 2u; ++i)
{
m_contact_forces[i].setZero();
m_self_contact_forces[i].setZero();
m_self_contact_repercusion_forces[i].setZero();
m_alpha[i].setZero();
}
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
UpdatePositionsAndVelocities(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x_n1)
{
// mV = x_n1;
// mX = mX + mV * mTimeStep;
mV = (x_n1 - mX) * (1.0 / mTimeStep);
mX = x_n1;
}
// template <typename TinyScalar, typename TinyConstants>
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> World<TinyScalar, TinyConstants>::
// ProjectiveDynamicsMethod()
// {
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1(3*mNumVertices);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1_new(3*mNumVertices);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1_new_v(3*mNumVertices);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> b(3*mNumVertices);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> d(3*mConstraintDofs);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> rhs_speed(3*mNumVertices);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v_n1(3*mNumVertices);
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v_n1_new(3*mNumVertices);
// d.setZero();
// b= (1.0/(mTimeStep*mTimeStep))*mMassMatrix*mQn;
// TinyScalar h2 = mTimeStep*mTimeStep;
// x_n1 = mQn;
// v_n1 = mV_without_collision;
// if(mIsCollision) {
// mCollisionDetector->BaseUpdateCollisions(mCollision_X_next);
// updateCollisionInIter(v_n1, x_n1);
// }
// int i;
// TinyScalar err = 100.;
// for(i=0; i<mMaxIteration; i++) {
// EvaluateDVector(x_n1,d);
// m_rhs_speed = mMassMatrix * mV_without_collision +
// mTimeStep * (mJ * d - mL * mX);
// if(mIsCollision) {
// convertCollisionIndex();
// CollisionHandling();
// convertCollisionIndexBack();
// }
// v_n1_new = mDynamicSolver.solve(m_rhs_speed/h2);
// x_n1_new = mX + v_n1_new*mTimeStep;
// if(mIsCollision) {
// updateCollisionInIter(v_n1_new, x_n1_new);
// }
// err = (x_n1_new - x_n1).norm()/x_n1.size();
// if(err < EPS) {
// break;
// }
// x_n1 = x_n1_new;
// }
// // if(mIsCollision) {
// // updateCollisionInIter(v_n1_new, x_n1_new);
// // }
// std::cout << "error " << err << " ";
// if(err < EPS)
// std::cout << "good!\n";
// else
// std::cout << "not converge!\n";
// return x_n1;
// }
template <typename TinyScalar, typename TinyConstants>
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> World<TinyScalar, TinyConstants>::
ProjectiveDynamicsMethod()
{
static int num_max = 0, num_iter = 0;
static double total_time = 0;
int numUnknown = mnonrigidIdx.size() + mDof;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1(numUnknown);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ori_x(3*mNumVertices);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ori_x_old(3*mNumVertices);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> x_n1_new(numUnknown);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> b(3*mNumVertices);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> d(3*mConstraintDofs);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ori_c, s, xf, xf_old(mnonrigidIdx.size());
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> hvf(mnonrigidIdx.size());
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> deltar(mnonrigidIdx.size());
deltar.setZero();
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> rhs_speed(3*mNumVertices);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v_n1(3*mNumVertices);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v_n1_new(3*mNumVertices);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> prev_mrhsspeed(3*mNumVertices);
mNonrigidX.resize(mnonrigidIdx.size());
d.setZero();
prev_mrhsspeed.setZero();
b = (1.0 / (mTimeStep * mTimeStep)) * mMassMatrix * mQn;
// std::cout << mTimeStep << " mTimeStep\n";
// std::cout << mQn[1] << " mQn\n";
// std::cout << b[1] << " b\n";
TinyScalar h2 = mTimeStep*mTimeStep;
v_n1 = mV_without_collision;
x_n1 = (mperm * mQn).template segment(0, mnonrigidIdx.size());
xf_old = x_n1;
mNonrigidX = (mperm * mX).template segment(0,mnonrigidIdx.size());
if(mIsCollision) {
// mCollisionDetector->BaseUpdateCollisions(mCollision_X_next);
updateCollisionInIter(mV_without_collision, mQn);
}
// std::cout << "\tmv0 "<<((mTimeStep*mV_without_collision).template segment<3>(353*3)).transpose() << std::endl;
ori_x_old.fill(0.);
for (int i = 0; i < mrigidBodies.size(); ++i)
mlinks[i]->guessNext();
TinyScalar err;
for(int i = 0; i < mMaxIteration; i++) {
ComputeV0NTr();
ori_x = ComputeOriX(x_n1);
// std::cout << "ori_x "<< i << " " << ori_x[1] << std::endl;
TinyScalar err = (ori_x - ori_x_old).norm()/ori_x.size();
if (err < EPS) {
// std::cout << "iter i0=" << i << std::endl;
break;
}
ori_x_old = ori_x;
EvaluateDVector(ori_x,d);
ori_c = b + mJ * d;
// std::cout << "b "<< i << " " << b[1] << std::endl;
// std::cout << "d "<< i << " " << d[1] << std::endl;
// std::cout << "hvf0 "<< i << " " << hvf[1] << std::endl;
// std::cout << "deltar "<< i << " " << deltar[1] << std::endl;
// std::cout << "ori_c "<< i << " " << ori_c[1] << std::endl;
ComputeSolution(deltar, ori_c, hvf, s);
// std::cout << "\thvf0 "<<(hvf.template segment<3>(353*3)).transpose() << std::endl;
xf = mNonrigidX + hvf;
// std::cout << "mNonrigidX "<< i << " " << mNonrigidX[1] << std::endl;
// std::cout << "hvf "<< i << " " << hvf[1] << std::endl;
x_n1_new << xf, s;
x_n1 = x_n1_new;
}
ComputeV0NTr();
// std::cout << x_n1_new[1] << " x_n1_new\n";
ori_x = ComputeOriX(x_n1_new);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> old_hvf = hvf;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> old_oric = ori_c;
// std::cout << mIsCollision << " mIsCollision\n";
if (mIsCollision) {
mCollisionDetector->ClearCollisions();
ori_x_old.setZero();
while (true) {
// printf("mIsCollision\n");
auto tmpv = (ori_x - mX) / mTimeStep;
updateCollisionInIter(tmpv, ori_x);
for (int i = 0; i < mContactIdx.size(); i++) {
mCollisionDetector->mMesh->nodes[i]->x = Vec3(
TinyConstants::getDouble( mCollision_X_next[3*i+0]),
TinyConstants::getDouble( mCollision_X_next[3*i+1]),
TinyConstants::getDouble( mCollision_X_next[3*i+2]));
}
if (mCollisionDetector->BaseUpdateCollisions(mCollision_X_next))
break;
// std::cout << "collision start!" << std::endl;
++num_max;
num_iter += 100;
bool flag = true;
for (int i = 0; i < mMaxIteration1; ++i) {
ComputeV0NTr();
ori_x = ComputeOriX(x_n1);
err = (ori_x - ori_x_old).cwiseAbs().maxCoeff();//.norm()/ori_x.size();//
// std::cout << "err1 " << err << std::endl;
// std::cout << "\tori_x "<<(ori_x.template segment<3>(mContactIdx[86]*3)).transpose() << std::endl;
if (err < EPS && flag) {
--num_max;
num_iter += i - 100;
// std::cout << "iter i1=" << i << std::endl;
break;
}
EvaluateDVector(ori_x,d);
auto tmpv = (ori_x - mX) / mTimeStep;
updateCollisionInIter(tmpv, ori_x);
// std::cout << (tmpv.template segment<3>(mContactIdx[38]*3)).transpose() << std::endl;
// std::cout << ori_x.segment<3>(mContactIdx[689]*3).transpose() << std::endl;
// m_rhs_speed = mMassMatrix * mV_without_collision;
m_rhs_speed = mMassMatrix * mV_without_collision +
mTimeStep * (mJ * d - mL * mX);
auto tmp = mTimeStep * mC * tmpv * mTimeStep;
// std::cout <<"m_rhs_speed bef "<< (m_rhs_speed.template segment<3>(mContactIdx[656]*3)).transpose() << std::endl;
m_rhs_speed -= tmp;
// std::cout <<"m_rhs_speed aft "<< (m_rhs_speed.template segment<3>(mContactIdx[656]*3)).transpose() << std::endl;
// std::cout <<"ori_x "<< (ori_x.template segment<3>(mContactIdx[656]*3)).transpose() << std::endl;
if (i == 0)
prev_mrhsspeed = m_rhs_speed;
// m_rhs_speed = mperm * m_rhs_speed;
// m_rhs_speed.segment(0, mnonrigidIdx.size()) -= tmp;
// m_rhs_speed = mperm.transpose() * m_rhs_speed;
// for (int i = 0; i < mnonrigidIdx.size(); i++) {
// m_rhs_speed[mnonrigidIdx[i]] -= tmp[i];
// }
convertCollisionIndex();
std::vector<int> cindices = CollisionHandling();
flag = true;
// for (int i : cindices) {
// auto tmp = (ori_x - ori_x_old).template segment<3>(mContactIdx[i]*3);
// // std::cout << i << " " << mContactIdx[i]*3 << " "<< tmp.transpose() << std::endl;
// if (tmp.norm() > 1e-5)
// flag = false;
// }
// std::cout << flag << std::endl;
// m_rhs_speed.setZero();
convertCollisionIndexBack();
//mrhsspeed is delta
// auto diff = m_rhs_speed - prev_mrhsspeed;
// std::cout << diff.norm() << std::endl;
// std::cout << diff.segment<3>(mContactIdx[406]*3).transpose() << std::endl;
// int idx = 0;
// for (int i = 0; i < mNumVertices*3; ++i)
// if (std::abs(diff[i])>std::abs(diff[idx]))
// idx = i;
// std::cout << "argmax= "<<idx<<" : "<<diff[idx]<<" "<< m_rhs_speed[idx]<<" "<<prev_mrhsspeed[idx]<< std::endl;
TinyScalar alpha = mAlpha;
prev_mrhsspeed = (m_rhs_speed * alpha + prev_mrhsspeed * (1 - alpha));
ori_c = b + mJ * d + prev_mrhsspeed / mTimeStep;
// std::cout << prev_mrhsspeed.norm() << std::endl;
// std::cout << (old_oric - ori_c).norm() << std::endl;
// std::cout <<"m_rhs_speed "<< (m_rhs_speed.template segment<3>(mContactIdx[656]*3)).transpose() << std::endl;
// ComputeSolution_collision(ori_c, hvf, hvf, s);
// deltar = mTimeStep * mLf * (hvf - old_hvf);
// old_hvf = hvf;
// ComputeSolution(deltar, ori_c, hvf, s);
hvf = mDynamicSolverM.solve((mperm * prev_mrhsspeed / mTimeStep).segment(0, mnonrigidIdx.size()));
// std::cout <<"hvf "<< (hvf.template segment<3>(mContactIdx[449]*3)).transpose() << std::endl;
// std::cout <<"hvf "<< (hvf.template segment<3>(mContactIdx[515]*3)).transpose() << std::endl;
// std::cout <<"hvf "<< (hvf.template segment<3>(mContactIdx[517]*3)).transpose() << std::endl;
// std::cout <<"hvf "<< (hvf.template segment<3>(mContactIdx[656]*3)).transpose() << std::endl;
// std::cout <<"mass "<< mH2ML.coeff(mContactIdx[656]*3,mContactIdx[656]*3) << std::endl;
xf = mNonrigidX + hvf;
x_n1_new << xf, s;
x_n1 = x_n1_new;
ori_x_old = ori_x;
}
}
}
// ComputeV0NTr();
// ori_x = ComputeOriX(x_n1_new);
return ori_x;
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
convertCollisionIndex() {
for (int i = 0; i < mContactIdx.size(); i++) {
int id = mContactIdx[i];
m_rhs.template block<3,1>(0,i) = m_rhs_speed.template block<3,1>(id*3,0);
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
updateCollisionInIter(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& v, const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x) {
for (int i = 0; i < mContactIdx.size(); i++) {
int id = mContactIdx[i];
mCollision_V_next.template block<3,1>(i*3,0) = v.template block<3,1>(id*3,0);
mCollision_X_next.template block<3,1>(i*3,0) = x.template block<3,1>(id*3,0);
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
convertCollisionIndexBack() {
for (int i = 0; i < mContactIdx.size(); i++) {
int id = mContactIdx[i];
m_rhs_speed.template block<3,1>(id*3,0) = m_rhs.template block<3,1>(0,i);
}
}
// template <typename TinyScalar, typename TinyConstants>
// void World<TinyScalar, TinyConstants>::
// PreComputation()
// {
// EvaluateJMatrix(mJ);
// EvaluateLMatrix(mL);
// EvaluateAMatrix();
// Eigen::SparseMatrix<TinyScalar> H2ML = (1.0/(mTimeStep*mTimeStep))*mMassMatrix+mL;
// printf("\n############### mL\n");
// for (int i = 0; i < 10; i++) { // for debug testing
// printf("(%f) ", mL.coeff(3*i, 3*i));
// }
// printf("\n############### mMassMatrix\n");
// for (int i = 0; i < 10; i++) { // for debug testing
// printf("(%f) ", mMassMatrix.coeff(3*i, 3*i));
// }
// printf("\n############### H2ML\n");
// Eigen::SparseMatrix<TinyScalar> h2 = H2ML*(mTimeStep*mTimeStep);
// for (int i = 0; i < 10; i++) { // for debug testing
// printf("(%f) ", h2.coeff(3*i, 3*i));
// }
// mMh2L = h2;
// FactorizeLDLT(H2ML,mDynamicSolver);
// FactorizeLDLT(mL,mQuasiStaticSolver);
// }
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
PreComputation()
{
EvaluateJMatrix(mJ);
EvaluateLMatrix(mL);
EvaluateAMatrix();
// mH2ML = (1.0/(mTimeStep*mTimeStep))*mMassMatrix+mL;
h2Mf = (1.0/(mTimeStep*mTimeStep))*mMassMatrix;
mH2ML = h2Mf + mL;
//split
Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic> perm(3*mNumVertices);
Eigen::SparseMatrix<TinyScalar> tmp = mH2ML;
auto lmd0 = [](const Eigen::Index& row, const Eigen::Index& col, const auto&){return row==col;};
auto lmd1 = [](const Eigen::Index& row, const Eigen::Index& col, const auto&){return row!=col;};
const int nnon = mnonrigidIdx.size(), nrig = mrigidIdx.size();
Eigen::VectorXi tmpp(3*mNumVertices);
for (int i = 0; i < nnon; ++i)
tmpp[i] = mnonrigidIdx[i];
for (int i = 0; i < nrig; ++i)
tmpp[i + nnon] = mrigidIdx[i];
//column-major p
for (int i = 0; i < 3*mNumVertices; ++i)
perm.indices()[tmpp[i]] = i;
tmp = mH2ML.twistedBy(perm);
Qf = tmp.block(0,0,nnon, nnon);
Qc_L = tmp.block(nnon, 0,nrig, nnon);
Qb_M = tmp.block(nnon, nnon,nrig, nrig);
tmp = mH2ML;
tmp.prune(lmd1);
mC = tmp*1;
mMh2L = mH2ML*(mTimeStep*mTimeStep);
FactorizeLDLT(Qf,mDynamicSolver);
tmp = mH2ML;
tmp.prune(lmd0);
FactorizeLDLT(tmp,mDynamicSolverM);
QbMmQcLQfm1QcLT = Qb_M - Qc_L * mDynamicSolver.solve(
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic>(Qc_L.transpose()));
// FactorizeLDLT(H2ML,mDynamicSolver);
// FactorizeLDLT(mL,mQuasiStaticSolver);
mperm = perm;
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
EvaluateJMatrix(Eigen::SparseMatrix<TinyScalar>& J)
{
J.resize(3*mNumVertices,3*mConstraintDofs);
std::vector<Eigen::Triplet<TinyScalar>> J_triplets;
int index = 0;
for(int i =0;i<mConstraints.size();i++)
{
mConstraints[i]->EvaluateJMatrix(index,J_triplets);
index+=mConstraints[i]->GetDof();
}
J.setFromTriplets(J_triplets.cbegin(), J_triplets.cend());
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
EvaluateAMatrix()
{
Eigen::SparseMatrix<TinyScalar> tmpB = mL;
m_ATA.resize(mNumContactVertices, mNumContactVertices);
// m_A.resize(mConstraintDofs, mNumContactVertices);
for (int i = 0; i < mNumContactVertices; i++) {
for (int j = 0; j < mNumContactVertices; j++) {
int id = mContactIdx[i];
int jd = mContactIdx[j];
if (id == jd) continue;
// if (id == -1 || jd == -1) continue;
if (tmpB.coeff(3*id, 3*jd) == 0)
continue;
m_ATA.coeffRef(i,j) = tmpB.coeff(3*id, 3*jd);
}
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
EvaluateLMatrix(Eigen::SparseMatrix<TinyScalar>& L)
{
L.resize(3*mNumVertices,3*mNumVertices);
std::vector<Eigen::Triplet<TinyScalar>> L_triplets;
for(auto c : mConstraints)
c->EvaluateLMatrix(L_triplets);
L.setFromTriplets(L_triplets.cbegin(), L_triplets.cend());
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
EvaluateDVector(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x,Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& d)
{
d.resize(mConstraintDofs*3);
int n = mConstraints.size();
// #pragma omp parallel for
for(int i=0;i<n;i++)
{
mConstraints[i]->EvaluateDVector(x);
}
int index = 0;
for(auto& c : mConstraints)
{
c->GetDVector(index,d);
}
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
FactorizeLDLT(const Eigen::SparseMatrix<TinyScalar>& A,Eigen::SimplicialLDLT<Eigen::SparseMatrix<TinyScalar>>& ldltSolver)
{
Eigen::SparseMatrix<TinyScalar> A_prime = A;
ldltSolver.analyzePattern(A_prime);
ldltSolver.factorize(A_prime);
TinyScalar reg = 1E-6;
bool success = true;
while (ldltSolver.info() != Eigen::Success)
{
reg *= 10;
A_prime = A + reg*mIdentityMatrix;
ldltSolver.factorize(A_prime);
success = false;
}
if (!success)
std::cout << "factorize failure (damping : " << reg<<" )"<<std::endl;
// else
// std::cout << "factorize success\n";
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
SetExternalForce(Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> external_force)
{
mExternalForces = external_force;
}
// collision ----------------------------------------------------------------------------------------------------------
#define BLOCK(m, id) ((m).col((id)))
#define LVAL(m, id, cmp) ((m)((cmp), (id)))
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
do_solve_friction(
Eigen::Matrix<TinyScalar, 3, 1> &p0,
TinyScalar m0,
Eigen::Matrix<TinyScalar, 3, 1> &r0,
std::vector<Eigen::Matrix<TinyScalar, 3, 1>> &ps,
std::vector<TinyScalar> &ms,
std::vector<Eigen::Matrix<TinyScalar, 3, 1>> &rs,
std::vector<bool> &stick,
TinyScalar mu) {
//compute stick p and m
Eigen::Matrix<TinyScalar, 3, 1> pstick = p0;
TinyScalar mstick = m0;
int n = stick.size();
for (int k = 0; k < n; ++k)
if (stick[k]) {
pstick += ps[k];
mstick += ms[k];
}
//compute slide p and m and fn
Eigen::Matrix<TinyScalar, 3, 1> pslide(0.,0.,0.);
TinyScalar mslide = 0, fn = 0;
for (int k = 0; k < n; ++k)
if (!stick[k]) {
pslide += ps[k];
mslide += ms[k];
fn += rs[k][0];
}
//compute stick force:
Eigen::Matrix<TinyScalar, 3, 1> v = pstick / mstick;
r0[1] = v[1] * m0 - p0[1];
r0[2] = v[2] * m0 - p0[2];
for (int k = 0; k < n; ++k)
if (stick[k]) {
rs[k][1] = v[1] * ms[k] - ps[k][1];
rs[k][2] = v[2] * ms[k] - ps[k][2];
}
if (mslide > 0) {
//compute relative v direction, and normalize
Eigen::Matrix<TinyScalar, 3, 1> rel_v = pslide / mslide - pstick / mstick;
TinyScalar t_len = sqrt(rel_v[1]*rel_v[1] + rel_v[2]*rel_v[2]);
rel_v /= t_len;
//apply sliding frictional force to stuck verts
TinyScalar mult = mu*fn/(mslide+mstick);
r0[1] += rel_v[1] * mult * m0;
r0[2] += rel_v[2] * mult * m0;
for (int k = 0; k < n; ++k)
if (stick[k]) {
rs[k][1] += rel_v[1] * mult * ms[k];
rs[k][2] += rel_v[2] * mult * ms[k];
}
//apply sliding frictional force to slide verts
Eigen::Matrix<TinyScalar, 3, 1> abs_v0 = (p0 + r0) / m0;
for (int k = 0; k < n; ++k)
if (!stick[k]) {
Eigen::Matrix<TinyScalar, 3, 1> rel_v = ps[k] / ms[k] - abs_v0;
TinyScalar t_len = sqrt(rel_v[1]*rel_v[1] + rel_v[2]*rel_v[2]);
rel_v /= t_len;
TinyScalar mult = -mu*rs[k][0] * ms[k] / (ms[k] + mstick);
rs[k][1] = mult*rel_v[1];
rs[k][2] = mult*rel_v[2];
}
}
//sanity check: sum of frictional forces should be one
Eigen::Matrix<TinyScalar, 3, 1> rsum = r0;
for (int k = 0; k < n; ++k)
rsum += rs[k];
assert(rsum.norm() < 1e-8);
}
template <typename TinyScalar, typename TinyConstants>
std::vector<int> World<TinyScalar, TinyConstants>::
CollisionHandling() {
const unsigned int nbCollisions = mCollisionDetector->m_collisions_infos.size();
const unsigned int nbSelfCollisions = mCollisionDetector->m_self_collisions_infos.size();
// Abort if there is no collisions
// std::cout << nbSelfCollisions << " nbSelfCollisions \n";
// std::cout << nbCollisions << " nbCollisions \n";
if ((nbCollisions == 0) && (!m_handle_self_collision || (nbSelfCollisions == 0)))
{
// m_rhs.setZero();
return {};
}
std::vector<int> ans;
TIMER_START(friction);
// Reconstruct the forces without any friction force
Eigen::Matrix<TinyScalar, 3, Eigen::Dynamic> forces = m_rhs;
// m_rhs.setZero();
// const auto v = Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic>::Map(mCollision_V_next.data(), 3, mNumContactVertices);
// std::cout << mNumContactVertices << " mNumContactVertices\n";
// std::cout << mCollision_V_next.rows() << "!" << mCollision_V_next.cols() << "\n";
// std::cout << v.rows() << " 1 " << v.cols() << "\n";
// std::cout << m_ATA.rows() << " 2 " << m_ATA.cols() << "\n";
// std::cout << forces.rows() << " 3 " << forces.cols() << "\n";
// std::cout << v.block<3,1>(0,0) << " v\n";
// std::cout << mCollision_V_next.block<3,1>(0,0) << " v\n";
// exit(0);
// #pragma omp parallel for
// for (size_t cmp = 0u; cmp < 3u; ++cmp)
// {
// forces.row(cmp) -=
// // std::pow(m_time_step, 2) * (getATA() * v.row(cmp).transpose()).transpose();
// //// ! better perf !
// std::pow(mTimeStep, 2) * v.row(cmp) * m_ATA;
// }
// Estimated friction force
// #pragma omp parallel for
for (unsigned int cId = 0u; cId < nbCollisions; ++cId)
{
// Data
const CollisionInfo<TinyScalar, TinyConstants> & collision_info =
mCollisionDetector->m_collisions_infos[cId];
const size_t vId = collision_info.vertex_index;
const Eigen::Matrix<TinyScalar, 3, 3>& local_basis = mCollisionDetector->m_local_contact_basis[cId];
const TinyScalar mu = collision_info.friction_coefficient;
// Converting the rhs in the local basis
// Contact force also sees the previous self collision forces
Eigen::Matrix<TinyScalar, 3, 1> forceLoc;
if (!m_handle_self_collision)
{
forceLoc = local_basis.transpose() *
(BLOCK(forces, vId));// - getVertexMass(vId) * collision_info.speed);
}
else
{
forceLoc = local_basis.transpose() *
(BLOCK(forces, vId) +
BLOCK(m_self_contact_forces[m_current_index], vId)
);
}
// Estimating the reaction
ans.push_back(vId);
if (forceLoc[0] > 0.)
{
// take-off
continue;
}
Eigen::Matrix<TinyScalar, 3, 1> r(0., 0., 0.);
// rN must prevent the penetration
r[0] = -forceLoc[0];
// rT try to prevent the sliding
// Sticking
r[1] = -forceLoc[1];
r[2] = -forceLoc[2];
const TinyScalar rT_norm = sqrt(r[1] * r[1] + r[2] * r[2]);
if (rT_norm > mu * r[0])
{
// but gets stuck at the border of the cone
// Sliding
r[1] *= mu * r[0] / rT_norm;
r[2] *= mu * r[0] / rT_norm;
}
// Converting r in the global frame
r = local_basis * r;
if ((!m_handle_self_collision) || (nbSelfCollisions == 0))
{
// Adding it directly to the rhs
for (unsigned int cmp = 0u; cmp < 3u; ++cmp)
{
//#pragma omp atomic update Not needed : max 1 contact per vertex
LVAL(m_rhs, vId, cmp) = r[cmp];
} // cmp
}
if (m_handle_self_collision)
{
// Storing it
for (unsigned int cmp = 0u; cmp < 3u; ++cmp)
{
LVAL(m_contact_forces[m_next_index], vId, cmp) = r[cmp];
} // cmp
} // self
} // cId
const TinyScalar duration_friction = TIMER_DURATION(friction, microseconds);
m_friction_times.push_back(duration_friction);
#ifdef TIMER_PRINT
std::cout << "# Rhs friction : " << duration_friction << " µs" << std::endl;
#endif // TIMER_PRINT
if ((m_handle_self_collision) && (nbSelfCollisions > 0))
{
TIMER_START(self_friction);
// Estimated self friction force
// for (size_t level = 0u; level < mCollisionDetector->m_collision_computation_order.size(); ++level)
{
// const std::vector<size_t>& collisions_level_ids = mCollisionDetector->m_collision_computation_order[level];
std::vector<SelfForceToAdd<TinyScalar, TinyConstants> > forces_to_add;
// forces_to_add.resize(collisions_level_ids.size());
forces_to_add.resize(nbSelfCollisions);
// #pragma omp parallel for
for (unsigned int i = 0u; i < forces_to_add.size(); ++i)
// for (unsigned int i = 0u; i < collisions_level_ids.size(); ++i)
{
// const size_t scId = collisions_level_ids[i];
const size_t scId = i;
// Data
const SelfCollisionInfo<TinyScalar, TinyConstants>& self_collision_info =
mCollisionDetector->m_self_collisions_infos[scId];
const size_t vId = self_collision_info.vertex_index;
std::vector<size_t> vId_list_old = self_collision_info.vertex_index_list;
const std::array<int, 3> fId = self_collision_info.face_indices;
const Eigen::Matrix<TinyScalar, 3, 3>& local_basis = mCollisionDetector->m_local_self_contact_basis[scId];
const Eigen::Matrix<TinyScalar, 3, 1>& alpha = self_collision_info.barycentric_coordinates;
TinyScalar mu = mFriction; //self_collision_info.friction_coefficient;
//compute p and m and transfer to local basis
TinyScalar m0 = getVertexMass(fId[0]) + getVertexMass(fId[1]) + getVertexMass(fId[2]);
Eigen::Matrix<TinyScalar, 3, 1> f0(0.,0.,0.), r0(0.,0.,0.);
for (int i = 0; i < 3; ++i) {
f0 += (BLOCK(forces, fId[i]) + BLOCK(m_contact_forces[m_next_index], fId[i]));
}
f0 = local_basis.transpose() * f0;
std::vector<size_t> vId_list;
for (int k : vId_list_old) {
Eigen::Matrix<TinyScalar, 3, 1> f = local_basis.transpose() * (BLOCK(forces, k) + BLOCK(m_contact_forces[m_next_index], k));
Eigen::Matrix<TinyScalar, 3, 1> rel_v = f / getVertexMass(k) - f0 / m0;
if (rel_v[0] <= 0)
vId_list.push_back(k);
}
std::vector<TinyScalar> ms;
std::vector<Eigen::Matrix<TinyScalar, 3, 1>> fs, rs(vId_list.size());
TinyScalar smv = 0., sm = 0.;
for (int k : vId_list) {
ms.push_back(getVertexMass(k));
sm += ms.back();
fs.push_back(local_basis.transpose() * (BLOCK(forces, k) + BLOCK(m_contact_forces[m_next_index], k)));
smv += fs.back()[0];
}
//compute fn
for (int k = 0; k < vId_list.size(); ++k) {
TinyScalar fn = (f0[0] + smv) * ms[k] / (m0 + sm) - fs[k][0];
// Eigen::Matrix<TinyScalar, 3, 1>
rs[k].fill(0);
rs[k][0] += fn;
r0[0] -= fn;
}
//assume all stick from the beginning and remove invalid ones
std::vector<bool> stick;
for (int k = 0; k < vId_list.size(); ++k)
stick.push_back(true);
while (1) {
//output r0, rs
do_solve_friction(f0, m0, r0, fs, ms, rs, stick, mu);
bool changed = false;
for (int k = 0; k < vId_list.size(); ++k)
if (stick[k] && rs[k][1]*rs[k][1]+rs[k][2]*rs[k][2] > mu*rs[k][0]*rs[k][0]) {
changed = true;
stick[k] = false;
}
if (!changed)
break;
// std::cout << "changed!" << std::endl;
}
//transfer back to world frame
r0 = local_basis * r0;
Eigen::Matrix<TinyScalar, 3, 1> v0 = (local_basis* f0 + r0) / m0;
// std::cout << "expected " << v0.transpose() << std::endl;
for (int i = 0; i < 3; ++i) {
m_self_contact_forces[m_next_index].col(fId[i]) += v0 * getVertexMass(fId[i]) - (BLOCK(forces, fId[i]) + BLOCK(m_contact_forces[m_next_index], fId[i]));
}
for (int k = 0; k < vId_list.size(); ++k) {
rs[k] = local_basis * rs[k];
m_self_contact_forces[m_next_index].col(vId_list[k]) += rs[k];
}
} // scId
// #pragma omp parallel for
// for (size_t i = 0u; i < collisions_level_ids.size(); ++i)
// {
// const size_t scId = collisions_level_ids[i];
// const Eigen::Matrix<TinyScalar, 3, 1> prev_force = mCollisionDetector->m_remember_self_contact_forces.col(scId);
// const SelfForceToAdd<TinyScalar, TinyConstants>& new_force = forces_to_add[i];
// for (size_t cmp = 0u; cmp < 3u; ++cmp)
// {
// // Remove from current
// // #pragma omp atomic update
// LVAL(m_self_contact_forces[m_current_index], new_force.id_plus, cmp) -=
// prev_force[cmp];
// // #pragma omp atomic update
// for (int k = 0; k < 3; ++k)
// LVAL(m_self_contact_forces[m_current_index], new_force.id_minus[k], cmp) +=
// prev_force[cmp] * new_force.alpha[k];
// // LVAL(m_self_contact_forces[m_current_index], new_force.id_minus, cmp) +=
// // prev_force[cmp];
// // #pragma omp atomic update
// LVAL(m_self_contact_forces[m_next_index], new_force.id_plus, cmp) +=
// new_force.force[cmp];
// // #pragma omp atomic update
// for (int k = 0; k < 3; ++k)
// LVAL(m_self_contact_forces[m_next_index], new_force.id_minus[k], cmp) -=
// new_force.force[cmp] * new_force.alpha[k];
// // LVAL(m_self_contact_forces[m_next_index], new_force.id_minus, cmp) -=
// // new_force.force[cmp];
// // #pragma omp atomic write
// mCollisionDetector->m_remember_self_contact_forces(cmp, scId) = new_force.force[cmp];
// } // cmp
// } // scId
} // level
// Add all the forces to the RHS
// std::cout << "???" << m_self_contact_forces[m_next_index].col(86).transpose()<<std::endl;
m_rhs += m_contact_forces[m_next_index] + m_self_contact_forces[m_next_index]
//+ m_self_contact_repercusion_forces[m_next_index]
;
const TinyScalar duration_self_friction = TIMER_DURATION(self_friction, microseconds);
m_self_friction_times.push_back(duration_self_friction);
#ifdef TIMER_PRINT
std::cout << "# Rhs self friction : " << duration_self_friction << " µs" << std::endl;
#endif // TIMER_PRINT
}
// Move on next step
if (m_handle_self_collision)
{
m_current_index = m_next_index;
m_next_index = (m_next_index) ? 0u : 1u;
m_contact_forces[m_next_index].setZero();
m_self_contact_forces[m_next_index].setZero();
// m_self_contact_repercusion_forces[m_next_index].setZero();
m_alpha[m_next_index].setZero();
}
return ans;
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
ComputeSolution(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &deltar,
const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &ori_c,
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &hvf,
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &s)
{
// TIMER_START(total)
//tildaV
int curr = 0, curc = 0;
Eigen::SparseMatrix<TinyScalar> tildaV(mrigidIdx.size(), mDof);
std::vector<Eigen::Triplet<TinyScalar>> trips;
for (int i = 0; i < mrigidBodies.size(); ++i)
if (mlinks[i]->parent == NULL)
mlinks[i]->ComputeTildaV(trips, V0, mInitX);
tildaV.setFromTriplets(trips.begin(), trips.end());
Qc = tildaV.transpose() * Qc_L;
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> tmp1(mnonrigidIdx.size());
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> tmp2(mrigidIdx.size());
auto tmp = mperm * ori_c;
tmp1 = tmp.template segment(0, mnonrigidIdx.size());
tmp2 = tmp.template segment(mnonrigidIdx.size(), mrigidIdx.size());
// std::cout << "tmp2 size " << tmp2.size() << "\n";
mcf = tmp1 - Qc_L.transpose() * V0 - Qf*mNonrigidX;
mcs = tildaV.transpose() * (tmp2 - Qb_M * V0 - Qc_L * mNonrigidX);
// std::cout << mJointTorque[0] << " "
// << mJointTorque[1] << " "
// << mJointTorque[2] << " mJointTorque1\n";
// std::cout << "mJointTorque size " << mJointTorque.size() << "\n";
// std::cout << mcs[6] << " "
// << mcs[7] << " "
// << mcs[8] << " mcs\n";
for (int i = 0; i < mJointTorque.size(); ++i) {
mcs[6+i] += mJointTorque[i];
}
// std::cout << mcs[6] << " "
// << mcs[7] << " "
// << mcs[8] << " mcs2\n";
Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> S = tildaV.transpose() * QbMmQcLQfm1QcLT * tildaV;
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> Sinv = S.inverse();
// TIMER_START(hvf)
if (mrigidIdx.size() > 0) {
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> solve_result = mDynamicSolver.solve(-mcf);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> tmp = mcs + Qc * solve_result;
s = S.fullPivLu().solve(tmp);
hvf = mDynamicSolver.solve(mcf - Qc.transpose() * s);
} else {
hvf = mDynamicSolver.solve(mcf);
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ttt = mcf;
ttt.setZero();
ttt = mDynamicSolver.solve(ttt);
}
for (int i = 0; i < mrigidBodies.size(); ++i)
mlinks[i]->UpdateT(s);
}
template <typename TinyScalar, typename TinyConstants>
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> World<TinyScalar, TinyConstants>::
ComputeOriX(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> &x_n1)
{
Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> ans;
ans.resize(3*mNumVertices);
for (int i = 0; i < mnonrigidIdx.size(); ++i)
ans[mnonrigidIdx[i]] = x_n1[i];
for (int i = 0; i < mrigidIdx.size(); ++i)
ans[mrigidIdx[i]] = V0[i];
return ans;
}
template <typename TinyScalar, typename TinyConstants>
void World<TinyScalar, TinyConstants>::
ComputeV0NTr()
{
V0.resize(mrigidIdx.size());
// // std::cout << T.size() << std::endl;
// for (int i = 0; i < T.size(); ++i) {
// Eigen::JacobiSVD<Eigen::Matrix<TinyScalar, 3, 3>> svd(T[i].template block<3,3>(0,0), Eigen::ComputeFullU | Eigen::ComputeFullV);
// Tr[i].template block<3,3>(0,0) = svd.matrixU()*svd.matrixV().transpose();
// Tr[i].template block<3,1>(0,3) = T[i].template block<3,1>(0,3);
// // std::cout << Tr[i] << std::endl;
// }
// for (int i = 0, j = 0; i < mrigidBodies.size(); ++i) {
// auto body = mrigidBodies[i];
// Eigen::Matrix34d &cur_Tr = Tr[i];
// for (int k = 0; k < body.size(); ++k) {
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> V = mInitX.segment<3>(body[k]*3), vh(4);
// vh<<V,1;
// Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1> v0 = cur_Tr * vh;
// V0.segment<3>(j) = v0;
// j += 3;
// }
// }
for (int i = 0; i < mrigidBodies.size(); ++i)
if (mlinks[i]->parent == NULL)
mlinks[i]->ComputeV0NTr(V0, mInitX);
}
#undef EPS
};
#endif |
GB_unop__identity_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: (none)
// op(A') function: GB_unop_tran__identity_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__fmod_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32)
// C=scalar+B GB (_bind1st__fmod_fp32)
// C=scalar+B' GB (_bind1st_tran__fmod_fp32)
// C=A+scalar GB (_bind2nd__fmod_fp32)
// C=A'+scalar GB (_bind2nd_tran__fmod_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fmodf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmodf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__fmod_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__fmod_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__fmod_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmodf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__fmod_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmodf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmodf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__fmod_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmodf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ApproxWeightPerfectMatching.h | //
// ApproxWeightPerfectMatching.h
//
//
// Created by Ariful Azad on 8/22/17.
//
//
#ifndef ApproxWeightPerfectMatching_h
#define ApproxWeightPerfectMatching_h
#include "CombBLAS/CombBLAS.h"
#include "BPMaximalMatching.h"
#include "BPMaximumMatching.h"
#include <parallel/algorithm>
#include <parallel/numeric>
#include <memory>
#include <limits>
using namespace std;
namespace combblas {
template <class IT>
struct AWPM_param
{
int nprocs;
int myrank;
int pr;
int pc;
IT lncol;
IT lnrow;
IT localRowStart;
IT localColStart;
IT m_perproc;
IT n_perproc;
std::shared_ptr<CommGrid> commGrid;
};
template <class IT, class NT>
std::vector<std::tuple<IT,IT,NT>> ExchangeData(std::vector<std::vector<std::tuple<IT,IT,NT>>> & tempTuples, MPI_Comm World)
{
/* Create/allocate variables for vector assignment */
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<IT,IT,NT>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
int nprocs;
MPI_Comm_size(World, &nprocs);
int * sendcnt = new int[nprocs];
int * recvcnt = new int[nprocs];
int * sdispls = new int[nprocs]();
int * rdispls = new int[nprocs]();
// Set the newly found vector entries
IT totsend = 0;
for(IT i=0; i<nprocs; ++i)
{
sendcnt[i] = tempTuples[i].size();
totsend += tempTuples[i].size();
}
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
std::partial_sum(sendcnt, sendcnt+nprocs-1, sdispls+1);
std::partial_sum(recvcnt, recvcnt+nprocs-1, rdispls+1);
IT totrecv = accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
std::vector< std::tuple<IT,IT,NT> > sendTuples(totsend);
for(int i=0; i<nprocs; ++i)
{
copy(tempTuples[i].begin(), tempTuples[i].end(), sendTuples.data()+sdispls[i]);
std::vector< std::tuple<IT,IT,NT> >().swap(tempTuples[i]); // clear memory
}
std::vector< std::tuple<IT,IT,NT> > recvTuples(totrecv);
MPI_Alltoallv(sendTuples.data(), sendcnt, sdispls, MPI_tuple, recvTuples.data(), recvcnt, rdispls, MPI_tuple, World);
DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // free all memory
MPI_Type_free(&MPI_tuple);
return recvTuples;
}
template <class IT, class NT>
std::vector<std::tuple<IT,IT,IT,NT>> ExchangeData1(std::vector<std::vector<std::tuple<IT,IT,IT,NT>>> & tempTuples, MPI_Comm World)
{
/* Create/allocate variables for vector assignment */
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<IT,IT,IT,NT>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
int nprocs;
MPI_Comm_size(World, &nprocs);
int * sendcnt = new int[nprocs];
int * recvcnt = new int[nprocs];
int * sdispls = new int[nprocs]();
int * rdispls = new int[nprocs]();
// Set the newly found vector entries
IT totsend = 0;
for(IT i=0; i<nprocs; ++i)
{
sendcnt[i] = tempTuples[i].size();
totsend += tempTuples[i].size();
}
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
std::partial_sum(sendcnt, sendcnt+nprocs-1, sdispls+1);
std::partial_sum(recvcnt, recvcnt+nprocs-1, rdispls+1);
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
std::vector< std::tuple<IT,IT,IT,NT> > sendTuples(totsend);
for(int i=0; i<nprocs; ++i)
{
copy(tempTuples[i].begin(), tempTuples[i].end(), sendTuples.data()+sdispls[i]);
std::vector< std::tuple<IT,IT,IT,NT> >().swap(tempTuples[i]); // clear memory
}
std::vector< std::tuple<IT,IT,IT,NT> > recvTuples(totrecv);
MPI_Alltoallv(sendTuples.data(), sendcnt, sdispls, MPI_tuple, recvTuples.data(), recvcnt, rdispls, MPI_tuple, World);
DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // free all memory
MPI_Type_free(&MPI_tuple);
return recvTuples;
}
// remember that getnrow() and getncol() require collectives
// Hence, we save them once and pass them to this function
template <class IT, class NT,class DER>
int OwnerProcs(SpParMat < IT, NT, DER > & A, IT grow, IT gcol, IT nrows, IT ncols)
{
auto commGrid = A.getcommgrid();
int procrows = commGrid->GetGridRows();
int proccols = commGrid->GetGridCols();
IT m_perproc = nrows / procrows;
IT n_perproc = ncols / proccols;
int pr, pc;
if(m_perproc != 0)
pr = std::min(static_cast<int>(grow / m_perproc), procrows-1);
else // all owned by the last processor row
pr = procrows -1;
if(n_perproc != 0)
pc = std::min(static_cast<int>(gcol / n_perproc), proccols-1);
else
pc = proccols-1;
return commGrid->GetRank(pr, pc);
}
/*
// Hence, we save them once and pass them to this function
template <class IT, class NT,class DER>
int OwnerProcs(SpParMat < IT, NT, DER > & A, IT grow, IT gcol, IT nrows, IT ncols)
{
int pr1, pc1;
if(m_perproc != 0)
pr1 = std::min(static_cast<int>(grow / m_perproc), pr-1);
else // all owned by the last processor row
pr1 = pr -1;
if(n_perproc != 0)
pc1 = std::min(static_cast<int>(gcol / n_perproc), pc-1);
else
pc1 = pc-1;
return commGrid->GetRank(pr1, pc1);
}
*/
template <class IT>
std::vector<std::tuple<IT,IT>> MateBcast(std::vector<std::tuple<IT,IT>> sendTuples, MPI_Comm World)
{
/* Create/allocate variables for vector assignment */
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<IT,IT>) , MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
int nprocs;
MPI_Comm_size(World, &nprocs);
int * recvcnt = new int[nprocs];
int * rdispls = new int[nprocs]();
int sendcnt = sendTuples.size();
MPI_Allgather(&sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
std::partial_sum(recvcnt, recvcnt+nprocs-1, rdispls+1);
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
std::vector< std::tuple<IT,IT> > recvTuples(totrecv);
MPI_Allgatherv(sendTuples.data(), sendcnt, MPI_tuple,
recvTuples.data(), recvcnt, rdispls,MPI_tuple,World );
DeleteAll(recvcnt, rdispls); // free all memory
MPI_Type_free(&MPI_tuple);
return recvTuples;
}
// -----------------------------------------------------------
// replicate weights of mates
// Can be improved by removing AllReduce by All2All
// -----------------------------------------------------------
template <class IT, class NT>
void ReplicateMateWeights( const AWPM_param<IT>& param, Dcsc<IT, NT>*dcsc, const std::vector<IT>& colptr, std::vector<IT>& RepMateC2R, std::vector<NT>& RepMateWR2C, std::vector<NT>& RepMateWC2R)
{
fill(RepMateWC2R.begin(), RepMateWC2R.end(), static_cast<NT>(0));
fill(RepMateWR2C.begin(), RepMateWR2C.end(), static_cast<NT>(0));
#ifdef THREADED
#pragma omp parallel for
#endif
for(int k=0; k<param.lncol; ++k)
{
IT lj = k; // local numbering
IT mj = RepMateC2R[lj]; // mate of j
if(mj >= param.localRowStart && mj < (param.localRowStart+param.lnrow) )
{
for(IT cp = colptr[k]; cp < colptr[k+1]; ++cp)
{
IT li = dcsc->ir[cp];
IT i = li + param.localRowStart;
// TODO: use binary search to directly go to mj-th entry if more than 32 nonzero in this column
if( i == mj)
{
RepMateWC2R[lj] = dcsc->numx[cp];
RepMateWR2C[mj-param.localRowStart] = dcsc->numx[cp];
//break;
}
}
}
}
MPI_Comm ColWorld = param.commGrid->GetColWorld();
MPI_Comm RowWorld = param.commGrid->GetRowWorld();
MPI_Allreduce(MPI_IN_PLACE, RepMateWC2R.data(), RepMateWC2R.size(), MPIType<NT>(), MPI_SUM, ColWorld);
MPI_Allreduce(MPI_IN_PLACE, RepMateWR2C.data(), RepMateWR2C.size(), MPIType<NT>(), MPI_SUM, RowWorld);
}
template <class IT, class NT,class DER>
NT Trace( SpParMat < IT, NT, DER > & A, IT& rettrnnz=0)
{
IT nrows = A.getnrow();
IT ncols = A.getncol();
auto commGrid = A.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int myrank=commGrid->GetRank();
int pr = commGrid->GetGridRows();
int pc = commGrid->GetGridCols();
//Information about the matrix distribution
//Assume that A is an nrow x ncol matrix
//The local submatrix is an lnrow x lncol matrix
int rowrank = commGrid->GetRankInProcRow();
int colrank = commGrid->GetRankInProcCol();
IT m_perproc = nrows / pr;
IT n_perproc = ncols / pc;
DER* spSeq = A.seqptr(); // local submatrix
IT localRowStart = colrank * m_perproc; // first row in this process
IT localColStart = rowrank * n_perproc; // first col in this process
IT trnnz = 0;
NT trace = 0.0;
for(auto colit = spSeq->begcol(); colit != spSeq->endcol(); ++colit) // iterate over columns
{
IT lj = colit.colid(); // local numbering
IT j = lj + localColStart;
for(auto nzit = spSeq->begnz(colit); nzit < spSeq->endnz(colit); ++nzit)
{
IT li = nzit.rowid();
IT i = li + localRowStart;
if( i == j)
{
trnnz ++;
trace += nzit.value();
}
}
}
MPI_Allreduce(MPI_IN_PLACE, &trnnz, 1, MPIType<IT>(), MPI_SUM, World);
MPI_Allreduce(MPI_IN_PLACE, &trace, 1, MPIType<NT>(), MPI_SUM, World);
rettrnnz = trnnz;
/*
if(myrank==0)
cout <<"nrows: " << nrows << " Nnz in the diag: " << trnnz << " sum of diag: " << trace << endl;
*/
return trace;
}
template <class NT>
NT MatchingWeight( std::vector<NT>& RepMateWC2R, MPI_Comm RowWorld, NT& minw)
{
NT w = 0;
minw = 99999999999999.0;
for(int i=0; i<RepMateWC2R.size(); i++)
{
//w += fabs(RepMateWC2R[i]);
//w += exp(RepMateWC2R[i]);
//minw = min(minw, exp(RepMateWC2R[i]));
w += RepMateWC2R[i];
minw = std::min(minw, RepMateWC2R[i]);
}
MPI_Allreduce(MPI_IN_PLACE, &w, 1, MPIType<NT>(), MPI_SUM, RowWorld);
MPI_Allreduce(MPI_IN_PLACE, &minw, 1, MPIType<NT>(), MPI_MIN, RowWorld);
return w;
}
// update the distributed mate vectors from replicated mate vectors
template <class IT>
void UpdateMatching(FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row, std::vector<IT>& RepMateR2C, std::vector<IT>& RepMateC2R)
{
auto commGrid = mateRow2Col.getcommgrid();
MPI_Comm RowWorld = commGrid->GetRowWorld();
int rowroot = commGrid->GetDiagOfProcRow();
int pc = commGrid->GetGridCols();
// mateRow2Col is easy
IT localLenR2C = mateRow2Col.LocArrSize();
//IT* localR2C = mateRow2Col.GetLocArr();
for(IT i=0, j = mateRow2Col.RowLenUntil(); i<localLenR2C; i++, j++)
{
mateRow2Col.SetLocalElement(i, RepMateR2C[j]);
//localR2C[i] = RepMateR2C[j];
}
// mateCol2Row requires communication
std::vector <int> sendcnts(pc);
std::vector <int> dpls(pc);
dpls[0] = 0;
for(int i=1; i<pc; i++)
{
dpls[i] = mateCol2Row.RowLenUntil(i);
sendcnts[i-1] = dpls[i] - dpls[i-1];
}
sendcnts[pc-1] = RepMateC2R.size() - dpls[pc-1];
IT localLenC2R = mateCol2Row.LocArrSize();
IT* localC2R = (IT*) mateCol2Row.GetLocArr();
MPI_Scatterv(RepMateC2R.data(),sendcnts.data(), dpls.data(), MPIType<IT>(), localC2R, localLenC2R, MPIType<IT>(),rowroot, RowWorld);
}
int ThreadBuffLenForBinning(int itemsize, int nbins)
{
// 1MB shared cache (per 2 cores) in KNL
#ifndef L2_CACHE_SIZE
#define L2_CACHE_SIZE 256000
#endif
int THREAD_BUF_LEN = 256;
while(true)
{
int bufferMem = THREAD_BUF_LEN * nbins * itemsize ;
if(bufferMem>L2_CACHE_SIZE ) THREAD_BUF_LEN/=2;
else break;
}
THREAD_BUF_LEN = std::min(nbins+1,THREAD_BUF_LEN);
return THREAD_BUF_LEN;
}
template <class IT, class NT>
std::vector< std::tuple<IT,IT,NT> > Phase1(const AWPM_param<IT>& param, Dcsc<IT, NT>* dcsc, const std::vector<IT>& colptr, const std::vector<IT>& RepMateR2C, const std::vector<IT>& RepMateC2R, const std::vector<NT>& RepMateWR2C, const std::vector<NT>& RepMateWC2R )
{
double tstart = MPI_Wtime();
MPI_Comm World = param.commGrid->GetWorld();
//Step 1: Count the amount of data to be sent to different processors
std::vector<int> sendcnt(param.nprocs,0); // number items to be sent to each processor
#ifdef THREADED
#pragma omp parallel
#endif
{
std::vector<int> tsendcnt(param.nprocs,0);
#ifdef THREADED
#pragma omp for
#endif
for(int k=0; k<param.lncol; ++k)
{
IT mj = RepMateC2R[k]; // lj = k
IT j = k + param.localColStart;
for(IT cp = colptr[k]; cp < colptr[k+1]; ++cp)
{
IT li = dcsc->ir[cp];
IT i = li + param.localRowStart;
IT mi = RepMateR2C[li];
if( i > mj) // TODO : stop when first come to this, may be use <
{
int rrank = param.m_perproc != 0 ? std::min(static_cast<int>(mj / param.m_perproc), param.pr-1) : (param.pr-1);
int crank = param.n_perproc != 0 ? std::min(static_cast<int>(mi / param.n_perproc), param.pc-1) : (param.pc-1);
int owner = param.commGrid->GetRank(rrank , crank);
tsendcnt[owner]++;
}
}
}
for(int i=0; i<param.nprocs; i++)
{
__sync_fetch_and_add(sendcnt.data()+i, tsendcnt[i]);
}
}
IT totsend = std::accumulate(sendcnt.data(), sendcnt.data()+param.nprocs, static_cast<IT>(0));
std::vector<int> sdispls (param.nprocs, 0);
std::partial_sum(sendcnt.data(), sendcnt.data()+param.nprocs-1, sdispls.data()+1);
std::vector< std::tuple<IT,IT,NT> > sendTuples(totsend);
std::vector<int> transferCount(param.nprocs,0);
int THREAD_BUF_LEN = ThreadBuffLenForBinning(24, param.nprocs);
//Step 2: Compile data to be sent to different processors
#ifdef THREADED
#pragma omp parallel
#endif
{
std::vector<int> tsendcnt(param.nprocs,0);
std::vector<std::tuple<IT,IT, NT>> tsendTuples (param.nprocs*THREAD_BUF_LEN);
#ifdef THREADED
#pragma omp for
#endif
for(int k=0; k<param.lncol; ++k)
{
IT mj = RepMateC2R[k];
IT lj = k;
IT j = k + param.localColStart;
for(IT cp = colptr[k]; cp < colptr[k+1]; ++cp)
{
IT li = dcsc->ir[cp];
IT i = li + param.localRowStart;
IT mi = RepMateR2C[li];
if( i > mj) // TODO : stop when first come to this, may be use <
{
double w = dcsc->numx[cp]- RepMateWR2C[li] - RepMateWC2R[lj];
int rrank = param.m_perproc != 0 ? std::min(static_cast<int>(mj / param.m_perproc), param.pr-1) : (param.pr-1);
int crank = param.n_perproc != 0 ? std::min(static_cast<int>(mi / param.n_perproc), param.pc-1) : (param.pc-1);
int owner = param.commGrid->GetRank(rrank , crank);
if (tsendcnt[owner] < THREAD_BUF_LEN)
{
tsendTuples[THREAD_BUF_LEN * owner + tsendcnt[owner]] = std::make_tuple(mi, mj, w);
tsendcnt[owner]++;
}
else
{
int tt = __sync_fetch_and_add(transferCount.data()+owner, THREAD_BUF_LEN);
copy( tsendTuples.data()+THREAD_BUF_LEN * owner, tsendTuples.data()+THREAD_BUF_LEN * (owner+1) , sendTuples.data() + sdispls[owner]+ tt);
tsendTuples[THREAD_BUF_LEN * owner] = std::make_tuple(mi, mj, w);
tsendcnt[owner] = 1;
}
}
}
}
for(int owner=0; owner < param.nprocs; owner++)
{
if (tsendcnt[owner] >0)
{
int tt = __sync_fetch_and_add(transferCount.data()+owner, tsendcnt[owner]);
copy( tsendTuples.data()+THREAD_BUF_LEN * owner, tsendTuples.data()+THREAD_BUF_LEN * owner + tsendcnt[owner], sendTuples.data() + sdispls[owner]+ tt);
}
}
}
double t1Comp = MPI_Wtime() - tstart;
tstart = MPI_Wtime();
// Step 3: Communicate data
std::vector<int> recvcnt (param.nprocs);
std::vector<int> rdispls (param.nprocs, 0);
MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
std::partial_sum(recvcnt.data(), recvcnt.data()+param.nprocs-1, rdispls.data()+1);
IT totrecv = std::accumulate(recvcnt.data(), recvcnt.data()+param.nprocs, static_cast<IT>(0));
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<IT,IT,NT>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
std::vector< std::tuple<IT,IT,NT> > recvTuples1(totrecv);
MPI_Alltoallv(sendTuples.data(), sendcnt.data(), sdispls.data(), MPI_tuple, recvTuples1.data(), recvcnt.data(), rdispls.data(), MPI_tuple, World);
MPI_Type_free(&MPI_tuple);
double t1Comm = MPI_Wtime() - tstart;
return recvTuples1;
}
template <class IT, class NT>
std::vector< std::tuple<IT,IT,IT,NT> > Phase2(const AWPM_param<IT>& param, std::vector<std::tuple<IT,IT,NT>>& recvTuples, Dcsc<IT, NT>* dcsc, const std::vector<IT>& colptr, const std::vector<IT>& RepMateR2C, const std::vector<IT>& RepMateC2R, const std::vector<NT>& RepMateWR2C, const std::vector<NT>& RepMateWC2R )
{
MPI_Comm World = param.commGrid->GetWorld();
double tstart = MPI_Wtime();
// Step 1: Sort for effecient searching of indices
__gnu_parallel::sort(recvTuples.begin(), recvTuples.end());
std::vector<std::vector<std::tuple<IT,IT, IT, NT>>> tempTuples1 (param.nprocs);
std::vector<int> sendcnt(param.nprocs,0); // number items to be sent to each processor
//Step 2: Count the amount of data to be sent to different processors
// Instead of binary search in each column, I am doing linear search
// Linear search is faster here because, we need to search 40%-50% of nnz
int nBins = 1;
#ifdef THREADED
#pragma omp parallel
{
nBins = omp_get_num_threads() * 4;
}
#endif
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<nBins; i++)
{
int perBin = recvTuples.size()/nBins;
int startBinIndex = perBin * i;
int endBinIndex = perBin * (i+1);
if(i==nBins-1) endBinIndex = recvTuples.size();
std::vector<int> tsendcnt(param.nprocs,0);
for(int k=startBinIndex; k<endBinIndex;)
{
IT mi = std::get<0>(recvTuples[k]);
IT lcol = mi - param.localColStart;
IT i = RepMateC2R[lcol];
IT idx1 = k;
IT idx2 = colptr[lcol];
for(; std::get<0>(recvTuples[idx1]) == mi && idx2 < colptr[lcol+1];) //**
{
IT mj = std::get<1>(recvTuples[idx1]) ;
IT lrow = mj - param.localRowStart;
IT j = RepMateR2C[lrow];
IT lrowMat = dcsc->ir[idx2];
if(lrowMat == lrow)
{
NT weight = std::get<2>(recvTuples[idx1]);
NT cw = weight + RepMateWR2C[lrow]; //w+W[M'[j],M[i]];
if (cw > 0)
{
int rrank = (param.m_perproc != 0) ? std::min(static_cast<int>(mj / param.m_perproc), param.pr-1) : (param.pr-1);
int crank = (param.n_perproc != 0) ? std::min(static_cast<int>(j / param.n_perproc), param.pc-1) : (param.pc-1);
int owner = param.commGrid->GetRank(rrank , crank);
tsendcnt[owner]++;
}
idx1++; idx2++;
}
else if(lrowMat > lrow)
idx1 ++;
else
idx2 ++;
}
for(;std::get<0>(recvTuples[idx1]) == mi ; idx1++);
k = idx1;
}
for(int i=0; i<param.nprocs; i++)
{
__sync_fetch_and_add(sendcnt.data()+i, tsendcnt[i]);
}
}
IT totsend = std::accumulate(sendcnt.data(), sendcnt.data()+param.nprocs, static_cast<IT>(0));
std::vector<int> sdispls (param.nprocs, 0);
std::partial_sum(sendcnt.data(), sendcnt.data()+param.nprocs-1, sdispls.data()+1);
std::vector< std::tuple<IT,IT,IT,NT> > sendTuples(totsend);
std::vector<int> transferCount(param.nprocs,0);
int THREAD_BUF_LEN = ThreadBuffLenForBinning(32, param.nprocs);
//Step 3: Compile data to be sent to different processors
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<nBins; i++)
{
int perBin = recvTuples.size()/nBins;
int startBinIndex = perBin * i;
int endBinIndex = perBin * (i+1);
if(i==nBins-1) endBinIndex = recvTuples.size();
std::vector<int> tsendcnt(param.nprocs,0);
std::vector<std::tuple<IT,IT, IT, NT>> tsendTuples (param.nprocs*THREAD_BUF_LEN);
for(int k=startBinIndex; k<endBinIndex;)
{
IT mi = std::get<0>(recvTuples[k]);
IT lcol = mi - param.localColStart;
IT i = RepMateC2R[lcol];
IT idx1 = k;
IT idx2 = colptr[lcol];
for(; std::get<0>(recvTuples[idx1]) == mi && idx2 < colptr[lcol+1];) //**
{
IT mj = std::get<1>(recvTuples[idx1]) ;
IT lrow = mj - param.localRowStart;
IT j = RepMateR2C[lrow];
IT lrowMat = dcsc->ir[idx2];
if(lrowMat == lrow)
{
NT weight = std::get<2>(recvTuples[idx1]);
NT cw = weight + RepMateWR2C[lrow]; //w+W[M'[j],M[i]];
if (cw > 0)
{
int rrank = (param.m_perproc != 0) ? std::min(static_cast<int>(mj / param.m_perproc), param.pr-1) : (param.pr-1);
int crank = (param.n_perproc != 0) ? std::min(static_cast<int>(j / param.n_perproc), param.pc-1) : (param.pc-1);
int owner = param.commGrid->GetRank(rrank , crank);
if (tsendcnt[owner] < THREAD_BUF_LEN)
{
tsendTuples[THREAD_BUF_LEN * owner + tsendcnt[owner]] = std::make_tuple(mj, mi, i, cw);
tsendcnt[owner]++;
}
else
{
int tt = __sync_fetch_and_add(transferCount.data()+owner, THREAD_BUF_LEN);
std::copy( tsendTuples.data()+THREAD_BUF_LEN * owner, tsendTuples.data()+THREAD_BUF_LEN * (owner+1) , sendTuples.data() + sdispls[owner]+ tt);
tsendTuples[THREAD_BUF_LEN * owner] = std::make_tuple(mj, mi, i, cw);
tsendcnt[owner] = 1;
}
}
idx1++; idx2++;
}
else if(lrowMat > lrow)
idx1 ++;
else
idx2 ++;
}
for(;std::get<0>(recvTuples[idx1]) == mi ; idx1++);
k = idx1;
}
for(int owner=0; owner < param.nprocs; owner++)
{
if (tsendcnt[owner] >0)
{
int tt = __sync_fetch_and_add(transferCount.data()+owner, tsendcnt[owner]);
std::copy( tsendTuples.data()+THREAD_BUF_LEN * owner, tsendTuples.data()+THREAD_BUF_LEN * owner + tsendcnt[owner], sendTuples.data() + sdispls[owner]+ tt);
}
}
}
// Step 4: Communicate data
double t2Comp = MPI_Wtime() - tstart;
tstart = MPI_Wtime();
std::vector<int> recvcnt (param.nprocs);
std::vector<int> rdispls (param.nprocs, 0);
MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
std::partial_sum(recvcnt.data(), recvcnt.data()+param.nprocs-1, rdispls.data()+1);
IT totrecv = std::accumulate(recvcnt.data(), recvcnt.data()+param.nprocs, static_cast<IT>(0));
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<IT,IT,IT,NT>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
std::vector< std::tuple<IT,IT,IT,NT> > recvTuples1(totrecv);
MPI_Alltoallv(sendTuples.data(), sendcnt.data(), sdispls.data(), MPI_tuple, recvTuples1.data(), recvcnt.data(), rdispls.data(), MPI_tuple, World);
MPI_Type_free(&MPI_tuple);
double t2Comm = MPI_Wtime() - tstart;
return recvTuples1;
}
// Old version of Phase 2
// Not multithreaded (uses binary search)
template <class IT, class NT>
std::vector<std::vector<std::tuple<IT,IT, IT, NT>>> Phase2_old(const AWPM_param<IT>& param, std::vector<std::tuple<IT,IT,NT>>& recvTuples, Dcsc<IT, NT>* dcsc, const std::vector<IT>& colptr, const std::vector<IT>& RepMateR2C, const std::vector<IT>& RepMateC2R, const std::vector<NT>& RepMateWR2C, const std::vector<NT>& RepMateWC2R )
{
std::vector<std::vector<std::tuple<IT,IT, IT, NT>>> tempTuples1 (param.nprocs);
for(int k=0; k<recvTuples.size(); ++k)
{
IT mi = std::get<0>(recvTuples[k]) ;
IT mj = std::get<1>(recvTuples[k]) ;
IT i = RepMateC2R[mi - param.localColStart];
NT weight = std::get<2>(recvTuples[k]);
if(colptr[mi- param.localColStart+1] > colptr[mi- param.localColStart] )
{
IT * ele = find(dcsc->ir+colptr[mi - param.localColStart], dcsc->ir+colptr[mi - param.localColStart+1], mj - param.localRowStart);
// TODO: Add a function that returns the edge weight directly
if (ele != dcsc->ir+colptr[mi - param.localColStart+1])
{
NT cw = weight + RepMateWR2C[mj - param.localRowStart]; //w+W[M'[j],M[i]];
if (cw > 0)
{
IT j = RepMateR2C[mj - param.localRowStart];
int rrank = (param.m_perproc != 0) ? std::min(static_cast<int>(mj / param.m_perproc), param.pr-1) : (param.pr-1);
int crank = (param.n_perproc != 0) ? std::min(static_cast<int>(j / param.n_perproc), param.pc-1) : (param.pc-1);
int owner = param.commGrid->GetRank(rrank , crank);
tempTuples1[owner].push_back(make_tuple(mj, mi, i, cw));
}
}
}
}
return tempTuples1;
}
template <class IT, class NT, class DER>
void TwoThirdApprox(SpParMat < IT, NT, DER > & A, FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row)
{
// Information about CommGrid and matrix layout
// Assume that processes are laid in (pr x pc) process grid
auto commGrid = A.getcommgrid();
int myrank=commGrid->GetRank();
MPI_Comm World = commGrid->GetWorld();
MPI_Comm ColWorld = commGrid->GetColWorld();
MPI_Comm RowWorld = commGrid->GetRowWorld();
int nprocs = commGrid->GetSize();
int pr = commGrid->GetGridRows();
int pc = commGrid->GetGridCols();
int rowrank = commGrid->GetRankInProcRow();
int colrank = commGrid->GetRankInProcCol();
int diagneigh = commGrid->GetComplementRank();
//Information about the matrix distribution
//Assume that A is an nrow x ncol matrix
//The local submatrix is an lnrow x lncol matrix
IT nrows = A.getnrow();
IT ncols = A.getncol();
IT nnz = A.getnnz();
IT m_perproc = nrows / pr;
IT n_perproc = ncols / pc;
DER* spSeq = A.seqptr(); // local submatrix
Dcsc<IT, NT>* dcsc = spSeq->GetDCSC();
IT lnrow = spSeq->getnrow();
IT lncol = spSeq->getncol();
IT localRowStart = colrank * m_perproc; // first row in this process
IT localColStart = rowrank * n_perproc; // first col in this process
AWPM_param<IT> param;
param.nprocs = nprocs;
param.pr = pr;
param.pc = pc;
param.lncol = lncol;
param.lnrow = lnrow;
param.m_perproc = m_perproc;
param.n_perproc = n_perproc;
param.localRowStart = localRowStart;
param.localColStart = localColStart;
param.myrank = myrank;
param.commGrid = commGrid;
//double t1CompAll = 0, t1CommAll = 0, t2CompAll = 0, t2CommAll = 0, t3CompAll = 0, t3CommAll = 0, t4CompAll = 0, t4CommAll = 0, t5CompAll = 0, t5CommAll = 0, tUpdateMateCompAll = 0, tUpdateWeightAll = 0;
double tPhase1 = 0, tPhase2 = 0, tPhase3 = 0, tPhase4 = 0, tPhase5 = 0, tUpdate = 0;
// -----------------------------------------------------------
// replicate mate vectors for mateCol2Row
// Communication cost: same as the first communication of SpMV
// -----------------------------------------------------------
int xsize = (int) mateCol2Row.LocArrSize();
int trxsize = 0;
MPI_Status status;
MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status);
std::vector<IT> trxnums(trxsize);
MPI_Sendrecv(mateCol2Row.GetLocArr(), xsize, MPIType<IT>(), diagneigh, TRX, trxnums.data(), trxsize, MPIType<IT>(), diagneigh, TRX, World, &status);
std::vector<int> colsize(pc);
colsize[colrank] = trxsize;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize.data(), 1, MPI_INT, ColWorld);
std::vector<int> dpls(pc,0); // displacements (zero initialized pid)
std::partial_sum(colsize.data(), colsize.data()+pc-1, dpls.data()+1);
int accsize = std::accumulate(colsize.data(), colsize.data()+pc, 0);
std::vector<IT> RepMateC2R(accsize);
MPI_Allgatherv(trxnums.data(), trxsize, MPIType<IT>(), RepMateC2R.data(), colsize.data(), dpls.data(), MPIType<IT>(), ColWorld);
// -----------------------------------------------------------
// -----------------------------------------------------------
// replicate mate vectors for mateRow2Col
// Communication cost: same as the first communication of SpMV
// (minus the cost of tranposing vector)
// -----------------------------------------------------------
xsize = (int) mateRow2Col.LocArrSize();
std::vector<int> rowsize(pr);
rowsize[rowrank] = xsize;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, rowsize.data(), 1, MPI_INT, RowWorld);
std::vector<int> rdpls(pr,0); // displacements (zero initialized pid)
std::partial_sum(rowsize.data(), rowsize.data()+pr-1, rdpls.data()+1);
accsize = std::accumulate(rowsize.data(), rowsize.data()+pr, 0);
std::vector<IT> RepMateR2C(accsize);
MPI_Allgatherv(mateRow2Col.GetLocArr(), xsize, MPIType<IT>(), RepMateR2C.data(), rowsize.data(), rdpls.data(), MPIType<IT>(), RowWorld);
// -----------------------------------------------------------
// Getting column pointers for all columns (for CSC-style access)
std::vector<IT> colptr (lncol+1,-1);
for(auto colit = spSeq->begcol(); colit != spSeq->endcol(); ++colit) // iterate over all columns
{
IT lj = colit.colid(); // local numbering
colptr[lj] = colit.colptr();
}
colptr[lncol] = spSeq->getnnz();
for(IT k=lncol-1; k>=0; k--)
{
if(colptr[k] == -1)
{
colptr[k] = colptr[k+1];
}
}
// TODO: will this fail empty local matrix where every entry of colptr will be zero
// -----------------------------------------------------------
// replicate weights of mates
// -----------------------------------------------------------
std::vector<NT> RepMateWR2C(lnrow);
std::vector<NT> RepMateWC2R(lncol);
ReplicateMateWeights(param, dcsc, colptr, RepMateC2R, RepMateWR2C, RepMateWC2R);
int iterations = 0;
NT minw;
NT weightCur = MatchingWeight(RepMateWC2R, RowWorld, minw);
NT weightPrev = weightCur - 999999999999;
while(weightCur > weightPrev && iterations++ < 10)
{
#ifdef DETAIL_STATS
if(myrank==0) std::cout << "Iteration " << iterations << ". matching weight: sum = "<< weightCur << " min = " << minw << std::endl;
#endif
// C requests
// each row is for a processor where C requests will be sent to
double tstart = MPI_Wtime();
std::vector<std::tuple<IT,IT,NT>> recvTuples = Phase1(param, dcsc, colptr, RepMateR2C, RepMateC2R, RepMateWR2C, RepMateWC2R );
tPhase1 += (MPI_Wtime() - tstart);
tstart = MPI_Wtime();
std::vector<std::tuple<IT,IT,IT,NT>> recvTuples1 = Phase2(param, recvTuples, dcsc, colptr, RepMateR2C, RepMateC2R, RepMateWR2C, RepMateWC2R );
std::vector< std::tuple<IT,IT,NT> >().swap(recvTuples);
tPhase2 += (MPI_Wtime() - tstart);
tstart = MPI_Wtime();
std::vector<std::tuple<IT,IT,IT,NT>> bestTuplesPhase3 (lncol);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int k=0; k<lncol; ++k)
{
bestTuplesPhase3[k] = std::make_tuple(-1,-1,-1,0); // fix this
}
for(int k=0; k<recvTuples1.size(); ++k)
{
IT mj = std::get<0>(recvTuples1[k]) ;
IT mi = std::get<1>(recvTuples1[k]) ;
IT i = std::get<2>(recvTuples1[k]) ;
NT weight = std::get<3>(recvTuples1[k]);
IT j = RepMateR2C[mj - localRowStart];
IT lj = j - localColStart;
// we can get rid of the first check if edge weights are non negative
if( (std::get<0>(bestTuplesPhase3[lj]) == -1) || (weight > std::get<3>(bestTuplesPhase3[lj])) )
{
bestTuplesPhase3[lj] = std::make_tuple(i,mi,mj,weight);
}
}
std::vector<std::vector<std::tuple<IT,IT, IT, NT>>> tempTuples1 (nprocs);
for(int k=0; k<lncol; ++k)
{
if( std::get<0>(bestTuplesPhase3[k]) != -1)
{
//IT j = RepMateR2C[mj - localRowStart]; /// fix me
IT i = std::get<0>(bestTuplesPhase3[k]) ;
IT mi = std::get<1>(bestTuplesPhase3[k]) ;
IT mj = std::get<2>(bestTuplesPhase3[k]) ;
IT j = RepMateR2C[mj - localRowStart];
NT weight = std::get<3>(bestTuplesPhase3[k]);
int owner = OwnerProcs(A, i, mi, nrows, ncols);
tempTuples1[owner].push_back(std::make_tuple(i, j, mj, weight));
}
}
//vector< tuple<IT,IT,IT, NT> >().swap(recvTuples1);
recvTuples1 = ExchangeData1(tempTuples1, World);
tPhase3 += (MPI_Wtime() - tstart);
tstart = MPI_Wtime();
std::vector<std::tuple<IT,IT,IT,IT, NT>> bestTuplesPhase4 (lncol);
// we could have used lnrow in both bestTuplesPhase3 and bestTuplesPhase4
// Phase 4
// at the owner of (i,mi)
#ifdef THREADED
#pragma omp parallel for
#endif
for(int k=0; k<lncol; ++k)
{
bestTuplesPhase4[k] = std::make_tuple(-1,-1,-1,-1,0);
}
for(int k=0; k<recvTuples1.size(); ++k)
{
IT i = std::get<0>(recvTuples1[k]) ;
IT j = std::get<1>(recvTuples1[k]) ;
IT mj = std::get<2>(recvTuples1[k]) ;
IT mi = RepMateR2C[i-localRowStart];
NT weight = std::get<3>(recvTuples1[k]);
IT lmi = mi - localColStart;
//IT lj = j - localColStart;
// cout <<"****" << i << " " << mi << " "<< j << " " << mj << " " << get<0>(bestTuplesPhase4[lj]) << endl;
// we can get rid of the first check if edge weights are non negative
if( ((std::get<0>(bestTuplesPhase4[lmi]) == -1) || (weight > std::get<4>(bestTuplesPhase4[lmi]))) && std::get<0>(bestTuplesPhase3[lmi])==-1 )
{
bestTuplesPhase4[lmi] = std::make_tuple(i,j,mi,mj,weight);
//cout << "(("<< i << " " << mi << " "<< j << " " << mj << "))"<< endl;
}
}
std::vector<std::vector<std::tuple<IT,IT,IT, IT>>> winnerTuples (nprocs);
for(int k=0; k<lncol; ++k)
{
if( std::get<0>(bestTuplesPhase4[k]) != -1)
{
//int owner = OwnerProcs(A, get<0>(bestTuples[k]), get<1>(bestTuples[k]), nrows, ncols); // (i,mi)
//tempTuples[owner].push_back(bestTuples[k]);
IT i = std::get<0>(bestTuplesPhase4[k]) ;
IT j = std::get<1>(bestTuplesPhase4[k]) ;
IT mi = std::get<2>(bestTuplesPhase4[k]) ;
IT mj = std::get<3>(bestTuplesPhase4[k]) ;
int owner = OwnerProcs(A, mj, j, nrows, ncols);
winnerTuples[owner].push_back(std::make_tuple(i, j, mi, mj));
/// be very careful here
// passing the opposite of the matching to the owner of (i,mi)
owner = OwnerProcs(A, i, mi, nrows, ncols);
winnerTuples[owner].push_back(std::make_tuple(mj, mi, j, i));
}
}
std::vector<std::tuple<IT,IT,IT,IT>> recvWinnerTuples = ExchangeData1(winnerTuples, World);
tPhase4 += (MPI_Wtime() - tstart);
tstart = MPI_Wtime();
// at the owner of (mj,j)
std::vector<std::tuple<IT,IT>> rowBcastTuples(recvWinnerTuples.size()); //(mi,mj)
std::vector<std::tuple<IT,IT>> colBcastTuples(recvWinnerTuples.size()); //(j,i)
#ifdef THREADED
#pragma omp parallel for
#endif
for(int k=0; k<recvWinnerTuples.size(); ++k)
{
IT i = std::get<0>(recvWinnerTuples[k]) ;
IT j = std::get<1>(recvWinnerTuples[k]) ;
IT mi = std::get<2>(recvWinnerTuples[k]) ;
IT mj = std::get<3>(recvWinnerTuples[k]);
colBcastTuples[k] = std::make_tuple(j,i);
rowBcastTuples[k] = std::make_tuple(mj,mi);
}
std::vector<std::tuple<IT,IT>> updatedR2C = MateBcast(rowBcastTuples, RowWorld);
std::vector<std::tuple<IT,IT>> updatedC2R = MateBcast(colBcastTuples, ColWorld);
tPhase5 += (MPI_Wtime() - tstart);
tstart = MPI_Wtime();
#ifdef THREADED
#pragma omp parallel for
#endif
for(int k=0; k<updatedR2C.size(); k++)
{
IT row = std::get<0>(updatedR2C[k]);
IT mate = std::get<1>(updatedR2C[k]);
RepMateR2C[row-localRowStart] = mate;
}
#ifdef THREADED
#pragma omp parallel for
#endif
for(int k=0; k<updatedC2R.size(); k++)
{
IT col = std::get<0>(updatedC2R[k]);
IT mate = std::get<1>(updatedC2R[k]);
RepMateC2R[col-localColStart] = mate;
}
// update weights of matched edges
// we can do better than this since we are doing sparse updates
ReplicateMateWeights(param, dcsc, colptr, RepMateC2R, RepMateWR2C, RepMateWC2R);
weightPrev = weightCur;
weightCur = MatchingWeight(RepMateWC2R, RowWorld, minw);
tUpdate += (MPI_Wtime() - tstart);
//UpdateMatching(mateRow2Col, mateCol2Row, RepMateR2C, RepMateC2R);
//CheckMatching(mateRow2Col,mateCol2Row);
}
#ifdef TIMING
if(myrank==0)
{
std::cout << "------------- overal timing (HWPM) -------------" << std::endl;
//std::cout << t1CompAll << " " << t1CommAll << " " << t2CompAll << " " << t2CommAll << " " << t3CompAll << " " << t3CommAll << " " << t4CompAll << " " << t4CommAll << " " << t5CompAll << " " << t5CommAll << " " << tUpdateMateCompAll << " " << tUpdateWeightAll << std::endl;
std::cout <<"Phase1: "<< tPhase1 << "\nPhase2: " << tPhase2 << "\nPhase3: " << tPhase3 << "\nPhase4: " << tPhase4 << "\nPhase5: " << tPhase5 << "\nUpdate: " << tUpdate << std::endl;
std::cout << "-------------------------------------------------" << std::endl;
}
#endif
// update the distributed mate vectors from replicated mate vectors
UpdateMatching(mateRow2Col, mateCol2Row, RepMateR2C, RepMateC2R);
//weightCur = MatchingWeight(RepMateWC2R, RowWorld);
}
template <class IT, class NT, class DER>
void TransformWeight(SpParMat < IT, NT, DER > & A, bool applylog)
{
//A.Apply([](NT val){return log(1+abs(val));});
// if the matrix has explicit zero entries, we can still have problem.
// One solution is to remove explicit zero entries before cardinality matching (to be tested)
//A.Apply([](NT val){if(val==0) return log(numeric_limits<NT>::min()); else return log(fabs(val));});
A.Apply([](NT val){return (fabs(val));});
FullyDistVec<IT, NT> maxvRow(A.getcommgrid());
A.Reduce(maxvRow, Row, maximum<NT>(), static_cast<NT>(numeric_limits<NT>::lowest()));
A.DimApply(Row, maxvRow, [](NT val, NT maxval){return val/maxval;});
FullyDistVec<IT, NT> maxvCol(A.getcommgrid());
A.Reduce(maxvCol, Column, maximum<NT>(), static_cast<NT>(numeric_limits<NT>::lowest()));
A.DimApply(Column, maxvCol, [](NT val, NT maxval){return val/maxval;});
if(applylog)
A.Apply([](NT val){return log(val);});
}
template <class IT, class NT>
void AWPM(SpParMat < IT, NT, SpDCCols<IT, NT> > & A1, FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row, bool optimizeProd=true, bool weightedCard=true)
{
SpParMat < IT, NT, SpDCCols<IT, NT> > A(A1); // creating a copy because it is being transformed
if(optimizeProd)
TransformWeight(A, true);
else
TransformWeight(A, false);
SpParMat < IT, NT, SpCCols<IT, NT> > Acsc(A);
SpParMat < IT, NT, SpDCCols<IT, bool> > Abool(A);
SpParMat < IT, NT, SpCCols<IT, bool> > ABoolCSC(MPI_COMM_WORLD);
if(weightedCard)
ABoolCSC = A;
FullyDistVec<IT, IT> degCol(A.getcommgrid());
Abool.Reduce(degCol, Column, plus<IT>(), static_cast<IT>(0));
double ts;
// Compute the initial trace
IT diagnnz;
double origWeight = Trace(A, diagnnz);
bool isOriginalPerfect = diagnnz==A.getnrow();
//--------------------------------------------------------
// Compute the maximal cardinality matching
//--------------------------------------------------------
if(weightedCard)
WeightedGreedy(Acsc, mateRow2Col, mateCol2Row, degCol);
else
WeightedGreedy(ABoolCSC, mateRow2Col, mateCol2Row, degCol);
double mclWeight = MatchingWeight( A, mateRow2Col, mateCol2Row);
bool isPerfectMCL = CheckMatching(mateRow2Col,mateCol2Row);
// if the original matrix has a perfect matching and better weight
if(isOriginalPerfect && mclWeight<=origWeight)
{
#ifdef DETAIL_STATS
SpParHelper::Print("Maximal matching is not better that the natural ordering. Hence, keeping the natural ordering.\n");
#endif
mateRow2Col.iota(A.getnrow(), 0);
mateCol2Row.iota(A.getncol(), 0);
mclWeight = origWeight;
isPerfectMCL = true;
}
//--------------------------------------------------------
// Compute the maximum cardinality matching
//--------------------------------------------------------
double tmcm = 0;
double mcmWeight = mclWeight;
bool isPerfectMCM = isPerfectMCL;
if(!isPerfectMCL) // run MCM only if we don't have a perfect matching
{
ts = MPI_Wtime();
if(weightedCard)
maximumMatching(Acsc, mateRow2Col, mateCol2Row, true, false, true);
else
maximumMatching(ABoolCSC, mateRow2Col, mateCol2Row, true, false, false);
tmcm = MPI_Wtime() - ts;
mcmWeight = MatchingWeight( A, mateRow2Col, mateCol2Row) ;
isPerfectMCM = CheckMatching(mateRow2Col,mateCol2Row);
}
if(!isPerfectMCM)
SpParHelper::Print("Warning: The Maximum Cardinality Matching did not return a perfect matching! Need to check the input matrix.\n");
//--------------------------------------------------------
// Increase the weight of the perfect matching
//--------------------------------------------------------
ts = MPI_Wtime();
TwoThirdApprox(A, mateRow2Col, mateCol2Row);
double tawpm = MPI_Wtime() - ts;
double awpmWeight = MatchingWeight( A, mateRow2Col, mateCol2Row) ;
bool isPerfectAWPM = CheckMatching(mateRow2Col,mateCol2Row);
if(!isPerfectAWPM)
SpParHelper::Print("Warning: The HWPM code did not return a perfect matching! Need to check the input matrix.\n");
if(isOriginalPerfect && awpmWeight<origWeight) // keep original
{
#ifdef DETAIL_STATS
SpParHelper::Print("AWPM is not better that the natural ordering. Hence, keeping the natural ordering.\n");
#endif
mateRow2Col.iota(A.getnrow(), 0);
mateCol2Row.iota(A.getncol(), 0);
awpmWeight = origWeight;
}
}
}
#endif /* ApproxWeightPerfectMatching_h */
|
GB_unaryop__ainv_uint8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_bool
// op(A') function: GB_tran__ainv_uint8_bool
// C type: uint8_t
// A type: bool
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_bool
(
uint8_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DOMINOSEC_fmt_plug.c | /*
* DOMINOSEC_fmt.c (version 3)
*
* Notes/Domino More Secure Internet Password module for Solar Designer's JtR
* by regenrecht at o2.pl, Dec 2005.
* Algorithm discovery by regenrecht at o2.pl, bartavelle at bandecon.com.
*
* Short description.
* 1. Make 128bit digest of key. (128/8=16 bytes)
* 2. Do bin2hex() of key digest and put braces around it. (16*2+2=34 bytes)
* 3. Concat output of previous step to 5 bytes of salt. (5+34=39 bytes)
* 4. Make 128bit digest of first 34 bytes (out of 39 bytes). (128/8=16 bytes)
* 5. Compare first 10 bytes (out of 16) to check if the key was correct.
*
* Password file should have form of:
* TomaszJegerman:(GKjXibCW2Ml6juyQHUoP)
* RubasznyJan:(GrixoFHOckC/2CnHrHtM)
*
* Further optimizations (including some code rewrites) by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DOMINOSEC;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DOMINOSEC);
#else
#include <ctype.h>
#include <string.h>
//#define DOMINOSEC_32BIT
#ifdef DOMINOSEC_32BIT
#include <stdint.h>
#endif
#include "misc.h"
#include "formats.h"
#include "common.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 128
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "dominosec"
#define FORMAT_NAME "Lotus Notes/Domino 6 More Secure Internet Password"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH 22
#define BINARY_SIZE 9 /* oh, well :P */
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE 5
#define SALT_ALIGN sizeof(uint32_t)
#define DIGEST_SIZE 16
#define BINARY_BUFFER_SIZE (DIGEST_SIZE-SALT_SIZE)
#define ASCII_DIGEST_LENGTH (DIGEST_SIZE*2)
#define MIN_KEYS_PER_CRYPT 3
#define MAX_KEYS_PER_CRYPT 6
static unsigned char (*digest34)[34];
static char (*saved_key)[PLAINTEXT_LENGTH+1];
static uint32_t (*crypt_out)[(DIGEST_SIZE + 3) / sizeof(uint32_t)];
static unsigned char saved_salt[SALT_SIZE];
static int keys_changed, salt_changed;
static const char hex_table[][2] = {
"00", "01", "02", "03", "04", "05", "06", "07",
"08", "09", "0A", "0B", "0C", "0D", "0E", "0F",
"10", "11", "12", "13", "14", "15", "16", "17",
"18", "19", "1A", "1B", "1C", "1D", "1E", "1F",
"20", "21", "22", "23", "24", "25", "26", "27",
"28", "29", "2A", "2B", "2C", "2D", "2E", "2F",
"30", "31", "32", "33", "34", "35", "36", "37",
"38", "39", "3A", "3B", "3C", "3D", "3E", "3F",
"40", "41", "42", "43", "44", "45", "46", "47",
"48", "49", "4A", "4B", "4C", "4D", "4E", "4F",
"50", "51", "52", "53", "54", "55", "56", "57",
"58", "59", "5A", "5B", "5C", "5D", "5E", "5F",
"60", "61", "62", "63", "64", "65", "66", "67",
"68", "69", "6A", "6B", "6C", "6D", "6E", "6F",
"70", "71", "72", "73", "74", "75", "76", "77",
"78", "79", "7A", "7B", "7C", "7D", "7E", "7F",
"80", "81", "82", "83", "84", "85", "86", "87",
"88", "89", "8A", "8B", "8C", "8D", "8E", "8F",
"90", "91", "92", "93", "94", "95", "96", "97",
"98", "99", "9A", "9B", "9C", "9D", "9E", "9F",
"A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7",
"A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF",
"B0", "B1", "B2", "B3", "B4", "B5", "B6", "B7",
"B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF",
"C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7",
"C8", "C9", "CA", "CB", "CC", "CD", "CE", "CF",
"D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7",
"D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF",
"E0", "E1", "E2", "E3", "E4", "E5", "E6", "E7",
"E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF",
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
"F8", "F9", "FA", "FB", "FC", "FD", "FE", "FF"
};
static const unsigned char lotus_magic_table[] = {
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
/* double power! */
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36
};
static struct fmt_tests tests[] = {
{"(GVMroLzc50YK/Yd+L8KH)", ""},
{"(GqnUDNNGNUz5HRoelmLU)", "x"},
{"(GNBpcGJRYpBe9orUOpmZ)", "dupaaa123"},
{"(G0xjUQzdKxvHpUYqo5hU)", "koziolekmatolek"},
{"(G+dfECo845XxUw+nFVYD)", "szesnascieznakow"},
{"(GowT5I2hVHZpRWpvGmux)", "terazjakiesdwadziesciacos"},
{"(Gq2bAtpguiTSSycy6dhu)", "trzydziescidwamozesieudaojnieuda"},
{"(G82TtgNcqcHGkpEo7wQp)", "looongrandominputdataforfunbutnotonlyoi!"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
digest34 = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*digest34));
keys_changed = salt_changed = 0;
}
static void done(void)
{
MEM_FREE(digest34);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static struct {
unsigned char salt[SALT_SIZE];
unsigned char hash[BINARY_BUFFER_SIZE];
} cipher_binary_struct;
static void mdtransform_norecalc_1(unsigned char state[16], unsigned char block[16])
{
union {
unsigned char c[48];
#ifdef DOMINOSEC_32BIT
uint32_t u32[12];
#endif
} x;
unsigned char *p;
unsigned int i, j, t;
t = 0; p = x.c;
for (j = 48; j > 32; j--) {
t = state[p - x.c] ^ lotus_magic_table[j + t];
*p++ = t;
}
for (; j > 16; j--) {
t = block[p - x.c - 16] ^ lotus_magic_table[j + t];
*p++ = t;
}
for (; j > 0; j--) {
t = state[p - x.c - 32] ^ block[p - x.c - 32] ^ lotus_magic_table[j + t];
*p++ = t;
}
#ifndef DOMINOSEC_32BIT
for (i = 0; i < 16; i++) {
p = x.c;
for (j = 48; j > 0; j--) {
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j + t];
}
}
#else
for (i = 0; i < 16; i++) {
uint32_t *q = x.u32;
p = x.c;
for (j = 48; j > 0; j--) {
uint32_t u = *q++;
t = *p++ = u ^ lotus_magic_table[j-- + t];
t = *p++ = (u >> 8) ^ lotus_magic_table[j-- + t];
u >>= 16;
t = *p++ = u ^ lotus_magic_table[j-- + t];
t = *p++ = (u >> 8) ^ lotus_magic_table[j + t];
}
}
#endif
p = x.c;
for (j = 48; j > 32; j--) {
state[p - x.c] = t = *p ^ lotus_magic_table[j + t];
p++;
}
}
static void mdtransform_1(unsigned char state[16],
unsigned char checksum[16], unsigned char block[16])
{
unsigned char c;
unsigned int i, t;
mdtransform_norecalc_1(state, block);
t = checksum[15];
for (i = 0; i < 16; i++) {
c = lotus_magic_table[block[i] ^ t];
t = checksum[i] ^= c;
}
}
static void mdtransform_norecalc_3(unsigned char state[3][16],
unsigned char block0[16],
unsigned char block1[16],
unsigned char block2[16])
{
union {
unsigned char c[48];
#ifdef DOMINOSEC_32BIT
uint32_t u32[12];
#endif
} x[3];
unsigned char *p0, *p1, *p2;
unsigned int i, j, t0, t1, t2;
t0 = t1 = t2 = 0;
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 32; j--) {
t0 = state[0][p0 - x[0].c] ^ lotus_magic_table[j + t0];
t1 = state[1][p1 - x[1].c] ^ lotus_magic_table[j + t1];
t2 = state[2][p2 - x[2].c] ^ lotus_magic_table[j + t2];
*p0++ = t0;
*p1++ = t1;
*p2++ = t2;
}
for (; j > 16; j--) {
t0 = block0[p0 - x[0].c - 16] ^ lotus_magic_table[j + t0];
t1 = block1[p1 - x[1].c - 16] ^ lotus_magic_table[j + t1];
t2 = block2[p2 - x[2].c - 16] ^ lotus_magic_table[j + t2];
*p0++ = t0;
*p1++ = t1;
*p2++ = t2;
}
for (; j > 0; j--) {
t0 = state[0][p0 - x[0].c - 32] ^ block0[p0 - x[0].c - 32] ^ lotus_magic_table[j + t0];
t1 = state[1][p1 - x[1].c - 32] ^ block1[p1 - x[1].c - 32] ^ lotus_magic_table[j + t1];
t2 = state[2][p2 - x[2].c - 32] ^ block2[p2 - x[2].c - 32] ^ lotus_magic_table[j + t2];
*p0++ = t0;
*p1++ = t1;
*p2++ = t2;
}
#ifndef DOMINOSEC_32BIT
for (i = 0; i < 16; i++) {
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 0; j--) {
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j-- + t2];
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j-- + t2];
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j-- + t2];
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j + t2];
}
}
#else
for (i = 0; i < 16; i++) {
uint32_t *q0 = x[0].u32;
uint32_t *q1 = x[1].u32;
uint32_t *q2 = x[2].u32;
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 0; j--) {
uint32_t u0 = *q0++;
uint32_t u1 = *q1++;
uint32_t u2 = *q2++;
t0 = *p0++ = u0 ^ lotus_magic_table[j + t0];
t1 = *p1++ = u1 ^ lotus_magic_table[j + t1];
t2 = *p2++ = u2 ^ lotus_magic_table[j-- + t2];
t0 = *p0++ = (u0 >> 8) ^ lotus_magic_table[j + t0];
t1 = *p1++ = (u1 >> 8) ^ lotus_magic_table[j + t1];
t2 = *p2++ = (u2 >> 8) ^ lotus_magic_table[j-- + t2];
u0 >>= 16;
u1 >>= 16;
u2 >>= 16;
t0 = *p0++ = u0 ^ lotus_magic_table[j + t0];
t1 = *p1++ = u1 ^ lotus_magic_table[j + t1];
t2 = *p2++ = u2 ^ lotus_magic_table[j-- + t2];
t0 = *p0++ = (u0 >> 8) ^ lotus_magic_table[j + t0];
t1 = *p1++ = (u1 >> 8) ^ lotus_magic_table[j + t1];
t2 = *p2++ = (u2 >> 8) ^ lotus_magic_table[j + t2];
}
}
#endif
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 32; j--) {
state[0][p0 - x[0].c] = t0 = *p0 ^ lotus_magic_table[j + t0];
state[1][p1 - x[1].c] = t1 = *p1 ^ lotus_magic_table[j + t1];
state[2][p2 - x[2].c] = t2 = *p2 ^ lotus_magic_table[j + t2];
p0++;
p1++;
p2++;
}
}
static void mdtransform_3(unsigned char state[3][16],
unsigned char checksum[3][16],
unsigned char block0[16],
unsigned char block1[16],
unsigned char block2[16])
{
unsigned int i, t0, t1, t2;
mdtransform_norecalc_3(state, block0, block1, block2);
t0 = checksum[0][15];
t1 = checksum[1][15];
t2 = checksum[2][15];
for (i = 0; i < 16; i++) {
t0 = checksum[0][i] ^= lotus_magic_table[block0[i] ^ t0];
t1 = checksum[1][i] ^= lotus_magic_table[block1[i] ^ t1];
t2 = checksum[2][i] ^= lotus_magic_table[block2[i] ^ t2];
}
}
#if 0
static void domino_big_md_1(unsigned char *in, unsigned int size, unsigned char *out)
{
unsigned char state[16] = {0};
unsigned char checksum[16] = {0};
unsigned char block[16];
unsigned int curpos = 0;
while (curpos + 15 < size) {
mdtransform_1(state, checksum, in + curpos);
curpos += 16;
}
{
unsigned int pad = size - curpos;
memcpy(block, in + curpos, pad);
memset(block + pad, 16 - pad, 16 - pad);
mdtransform_1(state, checksum, block);
}
mdtransform_norecalc_1(state, checksum);
memcpy(out, state, 16);
}
#endif
static void domino_big_md_3(unsigned char *in0, unsigned int size0,
unsigned char *in1, unsigned int size1,
unsigned char *in2, unsigned int size2,
unsigned char *out0, unsigned char *out1, unsigned char *out2)
{
unsigned char state[3][16] = {{0}, {0}, {0}};
unsigned char checksum[3][16] = {{0}, {0}, {0}};
unsigned char block[3][16];
unsigned int min, curpos = 0, curpos0, curpos1, curpos2;
min = (size0 < size1) ? size0 : size1;
if (size2 < min)
min = size2;
while (curpos + 15 < min) {
mdtransform_3(state, checksum,
in0 + curpos, in1 + curpos, in2 + curpos);
curpos += 16;
}
curpos0 = curpos;
while (curpos0 + 15 < size0) {
mdtransform_1(state[0], checksum[0], in0 + curpos0);
curpos0 += 16;
}
curpos1 = curpos;
while (curpos1 + 15 < size1) {
mdtransform_1(state[1], checksum[1], in1 + curpos1);
curpos1 += 16;
}
curpos2 = curpos;
while (curpos2 + 15 < size2) {
mdtransform_1(state[2], checksum[2], in2 + curpos2);
curpos2 += 16;
}
{
unsigned int pad0 = size0 - curpos0;
unsigned int pad1 = size1 - curpos1;
unsigned int pad2 = size2 - curpos2;
memcpy(block[0], in0 + curpos0, pad0);
memcpy(block[1], in1 + curpos1, pad1);
memcpy(block[2], in2 + curpos2, pad2);
memset(block[0] + pad0, 16 - pad0, 16 - pad0);
memset(block[1] + pad1, 16 - pad1, 16 - pad1);
memset(block[2] + pad2, 16 - pad2, 16 - pad2);
mdtransform_3(state, checksum, block[0], block[1], block[2]);
}
mdtransform_norecalc_3(state, checksum[0], checksum[1], checksum[2]);
memcpy(out0, state[0], 16);
memcpy(out1, state[1], 16);
memcpy(out2, state[2], 16);
}
static void domino_big_md_3_34(unsigned char *in0,
unsigned char *in1,
unsigned char *in2,
unsigned char *out0,
unsigned char *out1,
unsigned char *out2)
{
unsigned char state[3][16] = {{0}, {0}, {0}};
unsigned char checksum[3][16] = {{0}, {0}, {0}};
unsigned char block[3][16];
mdtransform_3(state, checksum, in0, in1, in2);
mdtransform_3(state, checksum, in0 + 16, in1 + 16, in2 + 16);
memcpy(block[0], in0 + 32, 2);
memcpy(block[1], in1 + 32, 2);
memcpy(block[2], in2 + 32, 2);
memset(block[0] + 2, 14, 14);
memset(block[1] + 2, 14, 14);
memset(block[2] + 2, 14, 14);
mdtransform_3(state, checksum, block[0], block[1], block[2]);
mdtransform_norecalc_3(state, checksum[0], checksum[1], checksum[2]);
memcpy(out0, state[0], 16);
memcpy(out1, state[1], 16);
memcpy(out2, state[2], 16);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
unsigned int i;
unsigned char ch;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
if (ciphertext[0] != '(' ||
ciphertext[1] != 'G' ||
ciphertext[CIPHERTEXT_LENGTH-1] != ')')
return 0;
for (i = 1; i < CIPHERTEXT_LENGTH-1; ++i) {
ch = ciphertext[i];
if (!isalnum(ch) && ch != '+' && ch != '/')
return 0;
}
return 1;
}
/*
static unsigned int proper_mul(int delta_apsik)
{
__asm__("movl $0xAAAAAAAB, %eax \n"
"movl 0x8(%ebp), %edx \n"
"mul %edx \n"
"shr $0x2,%edx \n"
"movl %edx, %eax \n");
}
*/
static void decode(unsigned char *ascii_cipher, unsigned char *binary)
{
unsigned int out = 0, apsik = 0, loop;
unsigned int i;
unsigned char ch;
ascii_cipher += 2;
i = 0;
do {
if (apsik < 8) {
/* should be using proper_mul, but what the heck...
it's nearly the same :] */
loop = 2; /* ~ loop = proper_mul(13 - apsik); */
apsik += loop*6;
do {
out <<= 6;
ch = *ascii_cipher;
if (ch < '0' || ch > '9')
if (ch < 'A' || ch > 'Z')
if (ch < 'a' || ch > 'z')
if (ch != '+')
if (ch == '/')
out += '?';
else
{ ; } /* shit happens */
else
out += '>';
else
out += ch-'=';
else
out += ch-'7';
else
out += ch-'0';
++ascii_cipher;
} while (--loop);
}
loop = apsik-8;
ch = out >> loop;
*(binary+i) = ch;
ch <<= loop;
apsik = loop;
out -= ch;
} while (++i < 15);
binary[3] += -4;
}
static void *get_binary(char *ciphertext)
{
static uint32_t out[BINARY_SIZE / sizeof(uint32_t) + 1];
decode((unsigned char*)ciphertext, (unsigned char*)&cipher_binary_struct);
memcpy(out, cipher_binary_struct.hash, BINARY_SIZE);
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
static uint32_t out[SALT_SIZE / sizeof(uint32_t) + 1];
decode((unsigned char*)ciphertext, (unsigned char*)&cipher_binary_struct);
memcpy(out, cipher_binary_struct.salt, SALT_SIZE);
return (void*)out;
}
static void set_salt(void *salt)
{
memcpy(saved_salt, salt, SALT_SIZE);
salt_changed = 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
keys_changed = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += 3) {
int i, j;
if (keys_changed) {
char *k0 = saved_key[index];
char *k1 = saved_key[index + 1];
char *k2 = saved_key[index + 2];
unsigned char digest16[3][16];
domino_big_md_3((unsigned char *)k0, strlen(k0),
(unsigned char *)k1, strlen(k1),
(unsigned char *)k2, strlen(k2),
digest16[0], digest16[1], digest16[2]);
/* Not (++i < 16) !
* Domino will do hash of first 34 bytes ignoring The Fact that now
* there is a salt at a beginning of buffer. This means that last 5
* bytes "EEFF)" of password digest are meaningless.
*/
for (i = 0, j = 6; i < 14; i++, j += 2) {
const char *hex2 = hex_table[ARCH_INDEX(digest16[0][i])];
digest34[index][j] = hex2[0];
digest34[index][j + 1] = hex2[1];
hex2 = hex_table[ARCH_INDEX(digest16[1][i])];
digest34[index + 1][j] = hex2[0];
digest34[index + 1][j + 1] = hex2[1];
hex2 = hex_table[ARCH_INDEX(digest16[2][i])];
digest34[index + 2][j] = hex2[0];
digest34[index + 2][j + 1] = hex2[1];
}
}
if (salt_changed) {
digest34[index + 2][0] = digest34[index + 1][0] =
digest34[index][0] = saved_salt[0];
digest34[index + 2][1] = digest34[index + 1][1] =
digest34[index][1] = saved_salt[1];
digest34[index + 2][2] = digest34[index + 1][2] =
digest34[index][2] = saved_salt[2];
digest34[index + 2][3] = digest34[index + 1][3] =
digest34[index][3] = saved_salt[3];
digest34[index + 2][4] = digest34[index + 1][4] =
digest34[index][4] = saved_salt[4];
digest34[index + 2][5] = digest34[index + 1][5] =
digest34[index][5] = '(';
}
domino_big_md_3_34(digest34[index], digest34[index + 1],
digest34[index + 2],
(unsigned char *)crypt_out[index],
(unsigned char *)crypt_out[index + 1],
(unsigned char *)crypt_out[index + 2]);
}
keys_changed = salt_changed = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
/*
* Only 10 bytes of digest are to be checked.
* 48 bits are left alone.
* Funny that.
*/
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
//printf("salt %08x hash %03x\n", *(uint32_t*)salt, *(uint32_t*)salt & (SALT_HASH_SIZE - 1));
return *(uint32_t*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_DOMINOSEC = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NULL },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "im2col.h"
#include "dark_cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <stdint.h>
#ifdef _WIN32
#include <intrin.h>
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#define TILE_M 4 // 4 ops
#define TILE_N 16 // AVX2 = 2 ops * 8 floats
#define TILE_K 16 // loop
#ifdef __cplusplus
#define PUT_IN_REGISTER
#else
#define PUT_IN_REGISTER register
#endif
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float* m = (float*)xcalloc(rows * cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
//--------------------------------------------
// XNOR bitwise GEMM for binary neural network
//--------------------------------------------
static inline unsigned char xnor(unsigned char a, unsigned char b) {
//return a == b;
return !(a^b);
}
// INT-32
static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
size_t src_i = index / 32;
int src_shift = index % 32;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
static inline uint32_t fill_bit_int32(char src) {
if (src == 0) return 0x00000000;
else return 0xFFFFFFFF;
}
static inline uint64_t fill_bit_int64(char src) {
if (src == 0) return 0x0000000000000000;
else return 0xFFFFFFFFFFFFFFFF;
}
void binary_int32_printf(uint32_t src) {
int i;
for (i = 0; i < 32; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void binary_int64_printf(uint64_t src) {
int i;
for (i = 0; i < 64; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k*ldb + j);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
char b_bit = get_bit(B, j*ldb + k);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
const char a_bit = get_bit(A, i*lda + k);
uint64_t a_bit64 = fill_bit_int64(a_bit);
int k_ldb = k*ldb;
for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056]
if ((N - j > 64) && (k_ldb % 8 == 0)) {
uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
//printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc
printf("\n %d \n", __popcnt64(c_bit64)); // msvs
int h;
for (h = 0; h < 64; ++h)
if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1;
//binary_int64_printf(a_bit64);
//binary_int64_printf(b_bit64);
//binary_int64_printf(c_bit64);
}
else {
for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k_ldb + j);
if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1;
}
}
}
}
}
if (mean_arr) {
//int K_2 = K / 2;
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
//float mean_val2 = 2 * mean_val;
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
//C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2;
}
}
}
else {
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i*ldc + j] = count_arr[i*ldc + j] - K / 2;
}
}
}
free(count_arr);
//getchar();
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
#ifdef WIN32
int tmp_count = __popcnt64(c_bit64);
#else
int tmp_count = __builtin_popcountll(c_bit64);
#endif
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
*/
//----------------------------
// is not used
/*
void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
{
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y;
}
}
}
*/
#ifndef GPU
uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
uint32_t reverse_32_bit(uint32_t a)
{
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return (reverse_8_bit(a >> 24) << 0) |
(reverse_8_bit(a >> 16) << 8) |
(reverse_8_bit(a >> 8) << 16) |
(reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
{
unsigned x, y;
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
}
}
}
unsigned char reverse_byte_1(char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
unsigned char reverse_byte(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
static unsigned char lookup[16] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, };
unsigned char reverse_byte_3(unsigned char n) {
// Reverse the top and bottom nibble then swap them.
return (lookup[n & 0b1111] << 4) | lookup[n >> 4];
}
void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
{
unsigned x, y, t;
x = y = 0;
// Load the array and pack it into x and y.
//x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
//y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
/*
// transpose by 8-bit
void transpose_bin(char *A, char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 8) {
int j;
for (j = 0; j < m; j += 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
//transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8);
transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
}
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
}
}
}
*/
#endif
// transpose by 32-bit
void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32);
//printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 32) {
int j;
for (j = 0; j < m; j += 32) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
//transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
}
for (; j < m; ++j) {
if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i);
}
}
}
static inline int popcnt_32(uint32_t val32) {
#ifdef WIN32 // Windows MSVS
int tmp_count = __popcnt(val32);
#else // Linux GCC
int tmp_count = __builtin_popcount(val32);
#endif
return tmp_count;
}
//----------------------------
#if (defined(__AVX__) && defined(__x86_64__)) || (defined(_WIN64) && !defined(__MINGW32__))
#ifdef _WIN64
#include <intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#if defined(_MSC_VER) && _MSC_VER <= 1900
static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
return a.m256i_i64[index];
}
static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
return a.m256i_i32[index];
}
#endif
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
return a.m256_f32[index];
}
#else // Linux GCC/Clang
#include <x86intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <cpuid.h>
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
switch(index) {
case 0:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
case 1:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 1));
case 2:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 2));
case 3:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 3));
case 4:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 4));
case 5:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 5));
case 6:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 6));
case 7:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 7));
default:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
}
}
void asm_cpuid(uint32_t* abcd, uint32_t eax)
{
uint32_t ebx = 0, edx = 0, ecx = 0;
// EBX is saved to EDI and later restored
__asm__("movl %%ebx, %%edi;"
"cpuid;"
"xchgl %%ebx, %%edi;"
: "=D"(ebx),
"+a"(eax), "+c"(ecx), "=d"(edx));
abcd[0] = eax;
abcd[1] = ebx;
abcd[2] = ecx;
abcd[3] = edx;
}
#endif
#ifdef _WIN32
// Windows
#define cpuid(info, x) __cpuidex(info, x, 0)
#else
// GCC Intrinsics
void cpuid(int info[4], int InfoType) {
__cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
}
#endif
// Misc.
static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1;
static int HW_ABM; // Advanced Bit Manipulation
// SIMD: 128-bit
static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA;
// SIMD: 256-bit
static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2;
// SIMD: 512-bit
static int HW_AVX512F; // AVX512 Foundation
static int HW_AVX512CD; // AVX512 Conflict Detection
static int HW_AVX512PF; // AVX512 Prefetch
static int HW_AVX512ER; // AVX512 Exponential + Reciprocal
static int HW_AVX512VL; // AVX512 Vector Length Extensions
static int HW_AVX512BW; // AVX512 Byte + Word
static int HW_AVX512DQ; // AVX512 Doubleword + Quadword
static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add
static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions
// https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set
void check_cpu_features(void) {
int info[4];
cpuid(info, 0);
int nIds = info[0];
cpuid(info, 0x80000000);
unsigned nExIds = info[0];
// Detect Features
if (nIds >= 0x00000001) {
cpuid(info, 0x00000001);
HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0;
HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0;
HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0;
HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0;
HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0;
}
if (nIds >= 0x00000007) {
cpuid(info, 0x00000007);
HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0;
HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0;
HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0;
}
if (nExIds >= 0x80000001) {
cpuid(info, 0x80000001);
HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0;
HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0;
HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0;
}
}
int is_avx() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_AVX;
if (result == 1) printf(" Used AVX \n");
else printf(" Not used AVX \n");
}
return result;
}
int is_fma_avx2() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_FMA3 && HW_AVX2;
if (result == 1) printf(" Used FMA & AVX2 \n");
else printf(" Not used FMA & AVX2 \n");
}
return result;
}
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
if (is_avx() == 1) { // AVX
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
else {
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
/* // SSE
__m128 a128, b128, c128, result128; // SSE
a128 = _mm_set1_ps(A_PART);
for (j = 0; j < N - 4; j += 4) {
b128 = _mm_loadu_ps(&B[k*ldb + j]);
c128 = _mm_loadu_ps(&C[i*ldc + j]);
//result128 = _mm_fmadd_ps(a128, b128, c128);
result128 = _mm_mul_ps(a128, b128);
result128 = _mm_add_ps(result128, c128);
_mm_storeu_ps(&C[i*ldc + j], result128);
}
int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
for (j = prev_end; j < N; ++j){
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
*/
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i;
#pragma omp parallel for
for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
{
int j, k;
int i_d, k_d;
for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
{
for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N)
{
// L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB
// L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB
// L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB
__m256 result256;
__m256 a256_0, b256_0; // AVX
__m256 a256_1, b256_1; // AVX
__m256 a256_2;// , b256_2; // AVX
__m256 a256_3;// , b256_3; // AVX
__m256 c256_0, c256_1, c256_2, c256_3;
__m256 c256_4, c256_5, c256_6, c256_7;
c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]);
c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]);
c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]);
c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]);
c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]);
c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]);
c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]);
c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]);
for (k_d = 0; k_d < (TILE_K); ++k_d)
{
a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]);
a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]);
a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]);
a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]);
b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]);
b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0);
//c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1);
//c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2);
//c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3);
//c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4);
//c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5);
//c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6);
//c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7);
result256 = _mm256_mul_ps(a256_0, b256_0);
c256_0 = _mm256_add_ps(result256, c256_0);
result256 = _mm256_mul_ps(a256_1, b256_0);
c256_1 = _mm256_add_ps(result256, c256_1);
result256 = _mm256_mul_ps(a256_0, b256_1);
c256_2 = _mm256_add_ps(result256, c256_2);
result256 = _mm256_mul_ps(a256_1, b256_1);
c256_3 = _mm256_add_ps(result256, c256_3);
result256 = _mm256_mul_ps(a256_2, b256_0);
c256_4 = _mm256_add_ps(result256, c256_4);
result256 = _mm256_mul_ps(a256_3, b256_0);
c256_5 = _mm256_add_ps(result256, c256_5);
result256 = _mm256_mul_ps(a256_2, b256_1);
c256_6 = _mm256_add_ps(result256, c256_6);
result256 = _mm256_mul_ps(a256_3, b256_1);
c256_7 = _mm256_add_ps(result256, c256_7);
}
_mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0);
_mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1);
_mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2);
_mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3);
_mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4);
_mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5);
_mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6);
_mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7);
}
for (j = (N / TILE_N)*TILE_N; j < N; ++j) {
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
for (k_d = k; k_d < (k + TILE_K); ++k_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d];
C[i_d*ldc + j] += A_PART*B[k_d*ldb + j];
}
}
}
}
for (k = (K / TILE_K)*TILE_K; k < K; ++k)
{
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k];
for (j = 0; j < N; ++j) {
C[i_d*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
for (i = (M / TILE_M)*TILE_M; i < M; ++i) {
int j, k;
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s];
__m256i a256 = _mm256_set1_epi32(A_PART);
for (j = 0; j < N - 8; j += 8)
{
__m256i b256 = *((__m256i*)&B[s*ldb + j]);
__m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
__m256i all_1 = _mm256_set1_epi8((char)255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)),
popcnt_32(_mm256_extract_epi32(xnor256, 2)),
popcnt_32(_mm256_extract_epi32(xnor256, 3)),
popcnt_32(_mm256_extract_epi32(xnor256, 4)),
popcnt_32(_mm256_extract_epi32(xnor256, 5)),
popcnt_32(_mm256_extract_epi32(xnor256, 6)),
popcnt_32(_mm256_extract_epi32(xnor256, 7)));
__m256 val2 = _mm256_set1_ps(2);
count = _mm256_mul_ps(count, val2); // count * 2
__m256 val32 = _mm256_set1_ps(32);
count = _mm256_sub_ps(count, val32); // count - 32
__m256 mean256 = _mm256_set1_ps(mean_val);
count = _mm256_mul_ps(count, mean256); // count * mean_val
__m256 c256 = *((__m256*)&C[i*ldc + j]);
count = _mm256_add_ps(count, c256); // c = c + count
*((__m256*)&C[i*ldc + j]) = count;
}
for (; j < N; ++j) // out_h*out_w;
{
PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
//int i, f, j;
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads( max_num_threads / 2);
}
#endif
//convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output);
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
for (i = 0; i < ksize*ksize*n*c; i+=8) {
*((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1));
}
//for (i = 0; i < w*h*c; i += 8) {
//*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1));
//}
//__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF);
//all256_last_zero.m256i_i32[7] = 0;
__m256i all256_last_zero =
_mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0);
__m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1);
//__m256 all256_sing1 = _mm256_set1_ps(0x80000000);
__m256 all256_one = _mm256_set1_ps(1);
__m256i all256i_one = _mm256_set1_epi32(1);
///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
float cur_mean = fabs(mean[fil]);
__m256 mean256 = _mm256_set1_ps(cur_mean);
// channel index
//for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w-8; x+=8)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
__m256 sum256 = _mm256_set1_ps(0);
for (chan = 0; chan < c; ++chan) {
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
//__m256 in = *((__m256*)&input[input_pre_index + input_y*w]);
if (input_y < 0 || input_y >= h) continue;
//__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]);
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
//if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
//if (input_y < 0 || input_y >= h) continue;
//sum += input[input_index] * weights[weights_index];
__m256 in = *((__m256*)&input[input_index]);
__m256 w = _mm256_set1_ps(weights[weights_index]);
//__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats
__m256 xor256 = _mm256_xor_ps(w, in);
//printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]);
//printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]);
//__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256);
//sum256 = xor256;
sum256 = _mm256_add_ps(xor256, sum256);
//printf("\n --- \n");
//printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]);
if (f_x < ksize-1) {
//in = _mm256_permutevar8x32_ps(in, idx256);
//in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero));
}
}
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
sum256 = _mm256_mul_ps(sum256, mean256);
//printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n",
// cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]);
//__m256 out = *((__m256*)&output[output_index]);
//out = _mm256_add_ps(out, sum256);
//*((__m256*)&output[output_index]) = out;
*((__m256*)&output[output_index]) = sum256;
//_mm256_storeu_ps(&C[i*ldc + j], result256);
}
}
}
// http://graphics.stanford.edu/~seander/bithacks.html
// https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
// https://arxiv.org/pdf/1611.07612.pdf
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
#if defined(_MSC_VER)
return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
#elif defined(__APPLE__) && defined(__clang__)
return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi));
#else
return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
#endif
}
static inline int popcnt256(__m256i n) {
return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
}
static inline __m256i count256(__m256i v) {
__m256i lookup =
_mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4);
__m256i low_mask = _mm256_set1_epi8(0x0f);
__m256i lo = _mm256_and_si256(v, low_mask);
__m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
__m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
__m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
__m256i total = _mm256_add_epi8(popcnt1, popcnt2);
return _mm256_sad_epu8(total, _mm256_setzero_si256());
}
static inline int popcnt256_custom(__m256i n) {
__m256i val = count256(n);
//return val.m256i_i64[0] +
//val.m256i_i64[1] +
//val.m256i_i64[2] +
//val.m256i_i64[3];
return _mm256_extract_epi64(val, 0)
+ _mm256_extract_epi64(val, 1)
+ _mm256_extract_epi64(val, 2)
+ _mm256_extract_epi64(val, 3);
}
static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) {
__m256i c_bit256 = _mm256_set1_epi8((char)255);
__m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
*count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm
}
// 2nd part - popcnt Mula's algorithm
static inline int get_count_mula(__m256i count_sum) {
return _mm256_extract_epi64(count_sum, 0)
+ _mm256_extract_epi64(count_sum, 1)
+ _mm256_extract_epi64(count_sum, 2)
+ _mm256_extract_epi64(count_sum, 3);
}
// 5x times faster than gemm()-float32
// further optimizations: do mean-mult only for the last layer
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads(max_num_threads / 2);
}
#endif
//#pragma omp parallel for
//for (i = 0; i < M; ++i)
#pragma omp parallel for
for (i = 0; i < (M/2)*2; i += 2)
{ // l.n - filters [16 - 55 - 1024]
float mean_val_0 = mean_arr[i + 0];
float mean_val_1 = mean_arr[i + 1];
int j, k;
//__m256i all_1 = _mm256_set1_epi8(255);
//for (j = 0; j < N; ++j)
for (j = 0; j < (N/2)*2; j += 2)
{ // out_h*out_w - one channel output size [169 - 173056]
//int count = 0;
const int bit_step = 256;
__m256i count_sum_0 = _mm256_set1_epi8(0);
__m256i count_sum_1 = _mm256_set1_epi8(0);
__m256i count_sum_2 = _mm256_set1_epi8(0);
__m256i count_sum_3 = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
__m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8));
__m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0);
xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1);
xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2);
xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3);
//count += popcnt256(c_bit256);
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
int count_0 = get_count_mula(count_sum_0);
int count_1 = get_count_mula(count_sum_1);
int count_2 = get_count_mula(count_sum_2);
int count_3 = get_count_mula(count_sum_3);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count_0 = count_0 - f1; // remove extra bits (from empty space for align only)
count_1 = count_1 - f1;
count_2 = count_2 - f1;
count_3 = count_3 - f1;
C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0;
C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0;
C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1;
C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1;
}
int i_d;
for (i_d = 0; i_d < 2; ++i_d)
{
float mean_val = mean_arr[i + i_d];
for (j = (N / 2) * 2; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val;
}
}
}
for (i = (M / 2) * 2; i < M; i += 1)
{
float mean_val = mean_arr[i];
int j, k;
for (j = 0; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
int c;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 4; w+=8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0];
data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1];
data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2];
data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3];
data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4];
data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5];
data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6];
data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7];
//_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col-pad; ++h) {
for (w = pad; w < width_col-pad-8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col-1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col-1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_align(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.00);
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
//mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8];
*dst_ptr |= (mask << (col_index % 8));
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR)
{}
else if (a == LEAKY)
{
if (is_fma_avx2()) {
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 all256_01 = _mm256_set1_ps(0.1F);
for (i = 0; i < n - 8; i += 8) {
//x[i] = (x[i]>0) ? x[i] : .1*x[i];
__m256 src256 = _mm256_loadu_ps(&x[i]);
__m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
__m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
__m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
_mm256_storeu_ps(&x[i], result256);
}
}
for (; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i+=8)
{
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
dst[i / 8] = mask;
}
}
static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb)
{
__m128 row1 = _mm_loadu_ps(&A[0 * lda]);
__m128 row2 = _mm_loadu_ps(&A[1 * lda]);
__m128 row3 = _mm_loadu_ps(&A[2 * lda]);
__m128 row4 = _mm_loadu_ps(&A[3 * lda]);
_MM_TRANSPOSE4_PS(row1, row2, row3, row4);
_mm_storeu_ps(&B[0 * ldb], row1);
_mm_storeu_ps(&B[1 * ldb], row2);
_mm_storeu_ps(&B[2 * ldb], row3);
_mm_storeu_ps(&B[3 * ldb], row4);
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
//int max_i2 = (i + block_size < n) ? (i + block_size) : n;
if (i + block_size < n) {
int max_i2 = i + block_size;
for (j = 0; j < m; j += block_size) {
//int max_j2 = (j + block_size < m) ? (j + block_size) : m;
if (j + block_size < m) {
int max_j2 = j + block_size;
for (i2 = i; i2 < max_i2; i2 += 4) {
for (j2 = j; j2 < max_j2; j2 += 4) {
transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb);
}
}
}
else {
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
else {
for (i2 = i; i2 < n; ++i2) {
for (j2 = 0; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
int b, k;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
//for (j = 0; j < out_w; ++j) {
j = 0;
if(stride == 1 && is_avx() == 1) {
for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
int out_index = j + out_w*(i + out_h*(k + c*b));
__m256 max256 = _mm256_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
max256 = _mm256_max_ps(src256, max256);
}
}
_mm256_storeu_ps(&dst[out_index], max256);
}
}
else if (size == 2 && stride == 2 && is_avx() == 1) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
//float max = -FLT_MAX;
//int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
//for (m = 0; m < size; ++m)
m = 0;
{
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
__m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
__m256 max256 = _mm256_max_ps(src256, src256_2);
__m128 src128_0 = _mm256_extractf128_ps(max256, 0);
__m128 src128_1 = _mm256_extractf128_ps(max256, 1);
__m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
max128 = _mm_max_ps(src128, max128);
}
}
_mm_storeu_ps(&dst[out_index], max128);
}
}
for (; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#else // AVX
int is_avx() {
return 0;
}
int is_fma_avx2() {
return 0;
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
//PUT_IN_REGISTER float A_PART = 1*a[i*k + s];
PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s];
for (j = 0; j < N; ++j) // out_h*out_w;
{
//c[i*n + j] += A_PART*b[s*n + j];
PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
//printf(" xnor_result = %d, ", xnor_result);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
//c[i*n + j] += count*mean;
}
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//int i, f, j;
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
static inline int popcnt_64(uint64_t val64) {
#ifdef WIN32 // Windows
#ifdef _WIN64 // Windows 64-bit
int tmp_count = __popcnt64(val64);
#else // Windows 32-bit
int tmp_count = __popcnt(val64);
tmp_count += __popcnt(val64 >> 32);
#endif
#else // Linux
#if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit
int tmp_count = __builtin_popcountll(val64);
#else // Linux 32-bit
int tmp_count = __builtin_popcount(val64);
tmp_count += __builtin_popcount(val64 >> 32);
#endif
#endif
return tmp_count;
}
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = popcnt_64(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n");
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
return;
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 1) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR)
{
}
else if (a == LEAKY)
{
for (i = 0; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
char* byte_arr = (char*)xcalloc(size, sizeof(char));
for (i = 0; i < size; ++i) {
if (src[i] > 0) byte_arr[i] = 1;
}
//for (i = 0; i < size; ++i) {
// dst[i / 8] |= byte_arr[i] << (i % 8);
//}
for (i = 0; i < size; i += 8) {
char dst_tmp = 0;
dst_tmp |= byte_arr[i + 0] << 0;
dst_tmp |= byte_arr[i + 1] << 1;
dst_tmp |= byte_arr[i + 2] << 2;
dst_tmp |= byte_arr[i + 3] << 3;
dst_tmp |= byte_arr[i + 4] << 4;
dst_tmp |= byte_arr[i + 5] << 5;
dst_tmp |= byte_arr[i + 6] << 6;
dst_tmp |= byte_arr[i + 7] << 7;
dst[i / 8] = dst_tmp;
}
free(byte_arr);
}
static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size)
{
int i;
//#pragma omp parallel for
for (i = 0; i<block_size; i++) {
int j;
for (j = 0; j<block_size; j++) {
B[j*ldb + i] = A[i*lda + j];
}
}
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
for (j = 0; j < m; j += block_size) {
int max_i2 = i + block_size < n ? i + block_size : n;
int max_j2 = j + block_size < m ? j + block_size : m;
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < max_j2; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
int b, k;
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
for (j = 0; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#endif // AVX
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
void repack_input(float *input, float *re_packed_input, int w, int h, int c)
{
const int items_per_channel = w * h;
int chan, i;
for (chan = 0; chan < c; chan += 32)
{
for (i = 0; i < items_per_channel; ++i)
{
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int i;
//#pragma omp parallel for
for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
{
int j;
for (j = 0; j < src_w; j += 1) // out_h*out_w;
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) // out_h*out_w;
{
float val = 0;
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
val += (2 * count - 32) * mean_val;
}
C[i*ldc + j] += val;
}
}
}
void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
{
int fil;
// filter index
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, y, x, f_y, f_x; // c_pack
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)
//for (c_pack = 0; c_pack < 32; ++c_pack)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
// normal
//float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
//float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
// packed
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//sum += input * weight;
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//uint32_t bit1 = input > 0;
//uint32_t bit2 = weight > 0;
//uint32_t count = (~(bit1 ^ bit2)) & 1;
//float result = (2 * (float)count - 1) * mean_val;
//printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
//sum += result;
uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
//uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
uint32_t xnor_result = ~(input ^ weight);
int32_t count = popcnt_32(xnor_result); // mandatory Signed int
sum += (2 * count - 32) * mean_val;
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
if (BETA != 1){
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
}
is_avx(); // initialize static variable
if (is_fma_avx2() && !TA && !TB) {
gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
}
else {
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream());
CHECK_CUDA(stream_status);
cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
CHECK_CUDA(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
void init_cpu() {
is_avx();
is_fma_avx2();
} |
mgl.h | /***************************************************************************
* mgl.h is part of Math Graphic Library
* Copyright (C) 2007-2016 Alexey Balakin <mathgl.abalakin@gmail.ru> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifndef _MGL_H_
#define _MGL_H_
#include "mgl2/mgl_cf.h"
#ifdef __cplusplus
#include "mgl2/data.h"
#include "mgl2/datac.h"
#include <sys/stat.h>
//-----------------------------------------------------------------------------
/// Wrapper class for all graphics
class MGL_EXPORT mglGraph
{
mglGraph(const mglGraph &) {} // copying is not allowed
const mglGraph &operator=(const mglGraph &t) { return t; }
protected:
HMGL gr;
public:
mglGraph(int kind=0, int width=600, int height=400)
{
if(kind==-1) gr=NULL;
#if MGL_HAVE_OPENGL
else if(kind==1) gr=mgl_create_graph_gl();
#else
else if(kind==1)
{ gr=mgl_create_graph(width, height);
SetGlobalWarn("OpenGL support was disabled. Please, enable it and rebuild MathGL."); }
#endif
else gr=mgl_create_graph(width, height);
}
mglGraph(HMGL graph)
{ gr = graph; mgl_use_graph(gr,1); }
virtual ~mglGraph()
{ if(mgl_use_graph(gr,-1)<1) mgl_delete_graph(gr); }
/// Get pointer to internal HMGL object
inline HMGL Self() { return gr; }
/// Set default parameters for plotting
inline void DefaultPlotParam() { mgl_set_def_param(gr); }
/// Set name of plot for saving filename
inline void SetPlotId(const char *id) { mgl_set_plotid(gr,id); }
/// Get name of plot for saving filename
inline const char *GetPlotId() { return mgl_get_plotid(gr); }
/// Ask to stop drawing
inline void Stop(bool stop=true) { mgl_ask_stop(gr, stop); }
/// Check if plot termination is asked
inline bool NeedStop() { return mgl_need_stop(gr); }
/// Set callback function for event processing
inline void SetEventFunc(void (*func)(void *), void *par=NULL)
{ mgl_set_event_func(gr, func, par); }
/// Set the transparency on/off.
inline void Alpha(bool enable) { mgl_set_alpha(gr, enable); }
/// Set the gray-scale mode on/off.
inline void Gray(bool enable) { mgl_set_gray(gr, enable); }
/// Set default value of alpha-channel
inline void SetAlphaDef(double alpha) { mgl_set_alpha_default(gr, alpha); }
/// Set the transparency type (0 - usual, 1 - glass, 2 - lamp)
inline void SetTranspType(int type) { mgl_set_transp_type(gr, type); }
/// Set the size of semi-transparent area around lines, marks, glyphs, ... Default is 1.
inline void SetPenDelta(double d) { mgl_pen_delta(gr,d); }
/// Set the using of light on/off.
inline void Light(bool enable) { mgl_set_light(gr, enable); }
/// Switch on/off the specified light source.
inline void Light(int n,bool enable) { mgl_set_light_n(gr, n, enable); }
/// Use diffusive light (only for local light sources) -- OBSOLETE
inline void SetDifLight(bool dif) { mgl_set_light_dif(gr, dif); }
/// Set to attach light settings to inplot.
inline void AttachLight(bool enable) { mgl_set_attach_light(gr, enable); }
/// Add a light source.
inline void AddLight(int n, mglPoint p, char col='w', double bright=0.5, double ap=0)
{ mgl_add_light_ext(gr, n, p.x, p.y, p.z, col, bright, ap); }
inline void AddLight(int n, mglPoint r, mglPoint p, char col='w', double bright=0.5, double ap=0)
{ mgl_add_light_loc(gr, n, r.x, r.y, r.z, p.x, p.y, p.z, col, bright, ap); }
/// Set ambient light brightness
inline void SetAmbient(double i) { mgl_set_ambbr(gr, i); }
/// Set diffusive light brightness
inline void SetDiffuse(double i) { mgl_set_difbr(gr, i); }
/// Set the fog distance or switch it off (if d=0).
inline void Fog(double d, double dz=0.25) { mgl_set_fog(gr, d, dz); }
/// Set relative width of rectangles in Bars, Barh, BoxPlot, Candle, OHLC (default is 0.7)
inline void SetBarWidth(double width) { mgl_set_bar_width(gr, width); }
/// Set default size of marks (locally you can use "size" option)
inline void SetMarkSize(double size) { mgl_set_mark_size(gr, size); }
/// Set default size of arrows (locally you can use "size" option)
inline void SetArrowSize(double size) { mgl_set_arrow_size(gr, size); }
/// Set number of mesh lines (use 0 to draw all of them)
inline void SetMeshNum(int num) { mgl_set_meshnum(gr, num); }
/// Set number of visible faces (use 0 to draw all of them)
inline void SetFaceNum(int num) { mgl_set_facenum(gr, num); }
/// Set cutting for points outside of bounding box
inline void SetCut(bool cut) { mgl_set_cut(gr, cut); }
/// Set additional cutting box
inline void SetCutBox(mglPoint p1, mglPoint p2)
{ mgl_set_cut_box(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z); }
/// Set the cutting off condition (formula)
inline void CutOff(const char *EqC) { mgl_set_cutoff(gr, EqC); }
/// Set default font size
inline void SetFontSize(double size) { mgl_set_font_size(gr, size);}
/// Set default font style and color
inline void SetFontDef(const char *fnt) { mgl_set_font_def(gr, fnt); }
/// Set FontSize by size in pt and picture DPI (default is 16 pt for dpi=72)
virtual void SetFontSizePT(double pt, int dpi=72) { SetFontSize(pt*27.f/dpi); }
/// Set FontSize by size in centimeters and picture DPI (default is 0.56 cm = 16 pt)
inline void SetFontSizeCM(double cm, int dpi=72) { SetFontSizePT(cm*28.45f,dpi); }
/// Set FontSize by size in inch and picture DPI (default is 0.22 in = 16 pt)
inline void SetFontSizeIN(double in, int dpi=72) { SetFontSizePT(in*72.27f,dpi); }
/// Load font from file
inline void LoadFont(const char *name, const char *path=NULL)
{ mgl_load_font(gr, name, path); }
/// Copy font from another mglGraph instance
inline void CopyFont(const mglGraph *GR) { mgl_copy_font(gr, GR->gr);}
/// Restore font (load default font for new HMGL objects)
inline void RestoreFont() { mgl_restore_font(gr); }
/// Set to use or not text rotation
inline void SetRotatedText(bool rotated) { mgl_set_rotated_text(gr, rotated); }
/// Set default font for all new HMGL and mglGraph objects
static inline void SetDefFont(const char *name, const char *path=NULL) { mgl_def_font(name,path); }
/// Set default palette
inline void SetPalette(const char *colors) { mgl_set_palette(gr, colors); }
/// Set default color scheme
inline void SetDefScheme(const char *sch) { mgl_set_def_sch(gr, sch); }
/// Sets RGB values for color with given id
static inline void SetColor(char id, double r, double g, double b) { mgl_set_color(id, r, g, b); }
/// Set mask for face coloring as array of type 'unsigned char[8]'
static inline void SetMask(char id, const char *mask) { mgl_set_mask(id, mask); }
/// Set mask for face coloring as uint64_t number
static inline void SetMask(char id, uint64_t mask) { mgl_set_mask_val(id, mask); }
/// Set default mask rotation angle
inline void SetMaskAngle(int angle) { mgl_set_mask_angle(gr, angle); }
/// Get last warning code
inline int GetWarn() { return mgl_get_warn(gr);}
/// Set warning code ant fill message
inline void SetWarn(int code, const char *info) { mgl_set_warn(gr,code,info); }
/// Get text of warning message(s)
inline const char *Message() { return mgl_get_mess(gr); }
/// Set global warning message
static inline void SetGlobalWarn(const char *text) { mgl_set_global_warn(text); }
/// Get text of global warning message(s)
static inline const char *GlobalWarn() { return mgl_get_global_warn(); }
/// Suppress printing warnings to stderr
static inline void SuppressWarn(bool on) { mgl_suppress_warn(on); }
/// Check if MathGL version is valid (return false) or not (return true)
static inline bool CheckVersion(const char *ver) { return mgl_check_version(ver); }
/// Set axis range scaling -- simplified way to shift/zoom axis range -- need to replot whole image!
inline void ZoomAxis(mglPoint p1=mglPoint(0,0,0,0), mglPoint p2=mglPoint(1,1,1,1))
{ mgl_zoom_axis(gr, p1.x,p1.y,p1.z,p1.c, p2.x,p2.y,p2.z,p2.c); }
/// Add [v1, v2] to the current range in direction dir
inline void AddRange(char dir, double v1, double v2)
{ mgl_add_range_val(gr, dir, v1, v2); }
/// Set range in direction dir as [v1, v2]
inline void SetRange(char dir, double v1, double v2)
{ mgl_set_range_val(gr, dir, v1, v2); }
/// Set range in direction dir as minimal and maximal values of data a
inline void SetRange(char dir, const mglDataA &dat, bool add=false)
{ mgl_set_range_dat(gr, dir, &dat, add); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz, const mglDataA &cc)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0);
mgl_set_range_dat(gr,'z',&zz,0); mgl_set_range_dat(gr,'c',&cc,0); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0);
mgl_set_range_dat(gr,'z',&zz,0); mgl_set_range_dat(gr,'c',&zz,0); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0); }
/// Set values of axis ranges
inline void SetRanges(double x1, double x2, double y1, double y2, double z1=0, double z2=0)
{ mgl_set_ranges(gr, x1, x2, y1, y2, z1, z2); }
/// Set values of axis ranges
inline void SetRanges(mglPoint p1, mglPoint p2)
{ mgl_set_ranges(gr, p1.x, p2.x, p1.y, p2.y, p1.z, p2.z); }
/// Set ranges for automatic variables
inline void SetAutoRanges(double x1, double x2, double y1=0, double y2=0, double z1=0, double z2=0, double c1=0, double c2=0)
{ mgl_set_auto_ranges(gr, x1, x2, y1, y2, z1, z2, c1, c2); }
/// Set ranges for automatic variables
inline void SetAutoRanges(mglPoint p1, mglPoint p2)
{ mgl_set_auto_ranges(gr, p1.x, p2.x, p1.y, p2.y, p1.z, p2.z, p1.c, p2.c); }
/// Set axis origin
inline void SetOrigin(mglPoint p)
{ mgl_set_origin(gr, p.x, p.y, p.z); }
inline void SetOrigin(double x0, double y0, double z0=mglNaN)
{ mgl_set_origin(gr, x0, y0, z0); }
/// Set the transformation formulas for coordinate. Use "" or NULL for built-in ones
inline void SetFunc(const char *EqX, const char *EqY, const char *EqZ=NULL, const char *EqA=NULL)
{ mgl_set_func(gr, EqX, EqY, EqZ, EqA); }
/// Set one of predefined transformation rule
inline void SetCoor(int how) { mgl_set_coor(gr, how); }
/// Set to draw Ternary axis (triangle like axis, grid and so on)
/** val=1 for Ternary axis (a+b+c=1, z=z),
* val=2 for Quaternary axis (a+b+c+d=1),
* val|4 for projections. */
inline void Ternary(int val) { mgl_set_ternary(gr, val); }
/// Set to use or not tick labels rotation
inline void SetTickRotate(bool val) { mgl_set_tick_rotate(gr,val); }
/// Set to use or not tick labels skipping
inline void SetTickSkip(bool val) { mgl_set_tick_skip(gr,val); }
/// Set tick length
inline void SetTickLen(double len, double stt=1)
{ mgl_set_tick_len(gr, len, stt); }
/// Set axis and ticks style
inline void SetAxisStl(const char *stl="k", const char *tck=0, const char *sub=0)
{ mgl_set_axis_stl(gr, stl, tck, sub); }
/// Set time templates for ticks
inline void SetTicksTime(char dir, double d=0, const char *t="")
{ mgl_set_ticks_time(gr,dir,d,t); }
/// Set ticks text (\n separated). Use "" to disable this feature.
inline void SetTicksVal(char dir, const char *lbl, bool add=false)
{ mgl_set_ticks_str(gr,dir,lbl,add); }
inline void SetTicksVal(char dir, const wchar_t *lbl, bool add=false)
{ mgl_set_ticks_wcs(gr,dir,lbl,add); }
/// Set ticks position and text (\n separated). Use "" to disable this feature.
inline void SetTicksVal(char dir, const mglDataA &v, const char *lbl, bool add=false)
{ mgl_set_ticks_val(gr,dir,&v,lbl,add); }
inline void SetTicksVal(char dir, const mglDataA &v, const wchar_t *lbl, bool add=false)
{ mgl_set_ticks_valw(gr,dir,&v,lbl,add); }
/// Add manual tick at given position. Use "" to disable this feature.
inline void AddTick(char dir, double val, const char *lbl)
{ mgl_add_tick(gr,dir,val,lbl); }
inline void AddTick(char dir, double val, const wchar_t *lbl)
{ mgl_add_tickw(gr,dir,val,lbl); }
/// Set the ticks parameters and string for its factor
inline void SetTicks(char dir, double d=0, int ns=0, double org=mglNaN, const char *factor="")
{ mgl_set_ticks_fact(gr, dir, d, ns, org, factor); }
inline void SetTicks(char dir, double d, int ns, double org, const wchar_t *factor)
{ mgl_set_ticks_factw(gr, dir, d, ns, org, factor); }
/// Auto adjust ticks
inline void Adjust(const char *dir="xyzc")
{ mgl_adjust_ticks(gr, dir); }
/// Set templates for ticks
inline void SetTickTempl(char dir, const char *t)
{ mgl_set_tick_templ(gr,dir,t); }
inline void SetTickTempl(char dir, const wchar_t *t)
{ mgl_set_tick_templw(gr,dir,t); }
/// Tune ticks (tune|1 for common multiplier, tune|2 for common component)
inline void SetTuneTicks(int tune, double fact_pos=1.15)
{ mgl_tune_ticks(gr, tune, fact_pos); }
/// Set additional shift of tick labels
inline void SetTickShift(mglPoint p)
{ mgl_set_tick_shift(gr,p.x,p.y,p.z,p.c); }
/// Set to use UTC time instead of local time
inline void SetTimeUTC(bool enable)
{ mgl_set_flag(gr,enable, MGL_USE_GMTIME); }
/// Set to draw tick labels at axis origin
inline void SetOriginTick(bool enable=true)
{ mgl_set_flag(gr,!enable, MGL_NO_ORIGIN); }
/// Put further plotting in m-th cell of nx*ny grid of the image.
/** String \a style may contain:
* '<' for reserving space at left
* '>' for reserving space at right
* '^' for reserving space at top
* '_' for reserving space at bottom
* '#' for using whole region. */
inline void SubPlot(int nx,int ny,int m,const char *style="<>_^", double dx=0, double dy=0)
{ mgl_subplot_d(gr, nx, ny, m, style, dx, dy); }
/// Put further plotting in rectangle of dx*dy cells starting from m-th cell of nx*ny grid of the image.
/** String \a style may contain:
* '<' for reserving space at left
* '>' for reserving space at right
* '^' for reserving space at top
* '_' for reserving space at bottom
* '#' for using whole region. */
inline void MultiPlot(int nx,int ny,int m, int dx, int dy, const char *style="<>_^")
{ mgl_multiplot(gr, nx, ny, m, dx, dy, style); }
/// Put further plotting in a region [x1,x2]*[y1,y2] of the image or subplot (x1,x2,y1,y2 in range [0, 1]).
inline void InPlot(double x1,double x2,double y1,double y2, bool rel=true)
{ if(rel) mgl_relplot(gr, x1, x2, y1, y2);
else mgl_inplot(gr, x1, x2, y1, y2); }
/// Put further plotting in column cell of previous subplot
inline void ColumnPlot(int num, int ind, double d=0)
{ mgl_columnplot(gr,num,ind,d); }
/// Put further plotting in matrix cell of previous subplot
inline void GridPlot(int nx, int ny, int ind, double d=0)
{ mgl_gridplot(gr,nx,ny,ind,d); }
/// Put further plotting in cell of stick rotated on angles tet, phi
inline void StickPlot(int num, int i, double tet, double phi)
{ mgl_stickplot(gr,num,i,tet,phi); }
/// Put further plotting in cell of stick sheared on sx, sy.
inline void ShearPlot(int num, int i, mreal sx, mreal sy, mreal xd=1, mreal yd=0)
{ mgl_shearplot(gr,num,i,sx,sy,xd,yd); }
/// Set factor of plot size
inline void SetPlotFactor(double val)
{ mgl_set_plotfactor(gr,val); }
/// Push transformation matrix into stack
inline void Push() { mgl_mat_push(gr); }
/// Pop transformation matrix from stack
inline void Pop() { mgl_mat_pop(gr); }
/// Add title for current subplot/inplot
/** Style '#' draw box around the title. */
inline void Title(const char *title,const char *stl="",double size=-2)
{ mgl_title(gr,title,stl,size); }
/// Add title for current subplot/inplot
/** Style '#' draw box around the title. */
inline void Title(const wchar_t *title,const char *stl="",double size=-2)
{ mgl_titlew(gr,title,stl,size); }
/// Set aspect ratio for further plotting.
inline void Aspect(double Ax,double Ay,double Az=1)
{ mgl_aspect(gr, Ax, Ay, Az); }
/// Shear a further plotting.
inline void Shear(double Sx,double Sy)
{ mgl_shear(gr, Sx, Sy); }
/// Rotate a further plotting.
inline void Rotate(double TetX,double TetZ=0,double TetY=0)
{ mgl_rotate(gr, TetX, TetZ, TetY); }
/// Rotate a further plotting around vector {x,y,z}.
inline void RotateN(double Tet,double x,double y,double z)
{ mgl_rotate_vector(gr, Tet, x, y, z); }
/// Set perspective (in range [0,1)) for plot. Set to zero for switching off.
inline void Perspective(double val)
{ mgl_perspective(gr, val); }
/// Set angle of view independently from Rotate().
inline void View(double TetX,double TetZ=0,double TetY=0)
{ mgl_view(gr, TetX, TetZ, TetY); }
/// Set angle of view independently from Rotate().
inline void ViewAsRotate(double TetZ,double TetX,double TetY=0)
{ mgl_view(gr, -TetX, -TetZ, -TetY); }
/// Zoom in/out a part of picture (use Zoom(0, 0, 1, 1) for restore default)
inline void Zoom(double x1, double y1, double x2, double y2)
{ mgl_zoom(gr, x1, y1, x2, y2); }
/// Set size of frame in pixels. Normally this function is called internally.
inline void SetSize(int width, int height, bool clf=true)
{ if(clf) mgl_set_size(gr, width, height);
else mgl_scale_size(gr, width, height); }
/// Scaling for all further set size calls.
static inline void SetSizeScl(double scl) { mgl_set_size_scl(scl); }
/// Set plot quality
/** qual=0 -- no face drawing (fastest),
* qual=1 -- no color interpolation (fast),
* qual=2 -- high quality (normal),
* qual|4 -- direct bitmap drawing (low memory usage);
* qual|8 for dots drawing instead of primitives (extremely fast). */
inline void SetQuality(int qual=MGL_DRAW_NORM) { mgl_set_quality(gr, qual); }
/// Get plot quality
inline int GetQuality() { return mgl_get_quality(gr); }
/// Set drawing region for Quality&4
inline void SetDrawReg(long nx=1, long ny=1, long m=0) { mgl_set_draw_reg(gr,nx,ny,m); }
/// Start group of objects
inline void StartGroup(const char *name) { mgl_start_group(gr, name); }
/// End group of objects
inline void EndGroup() { mgl_end_group(gr); }
/// Highlight objects with given id
inline void Highlight(int id) { mgl_highlight(gr, id); }
/// Show current image
inline void ShowImage(const char *viewer, bool keep=0)
{ mgl_show_image(gr, viewer, keep); }
/// Write the frame in file (depending extension, write current frame if fname is empty)
inline void WriteFrame(const char *fname=0,const char *descr="")
{ mgl_write_frame(gr, fname, descr); }
/// Write the frame in file using JPEG format
inline void WriteJPEG(const char *fname,const char *descr="")
{ mgl_write_jpg(gr, fname, descr); }
/// Write the frame in file using PNG format with transparency
inline void WritePNG(const char *fname,const char *descr="", bool alpha=true)
{ if(alpha) mgl_write_png(gr, fname, descr);
else mgl_write_png_solid(gr, fname, descr); }
/// Write the frame in file using BMP format
inline void WriteBMP(const char *fname,const char *descr="")
{ mgl_write_bmp(gr, fname, descr); }
/// Write the frame in file using BMP format
inline void WriteTGA(const char *fname,const char *descr="")
{ mgl_write_tga(gr, fname, descr); }
/// Write the frame in file using PostScript format
inline void WriteEPS(const char *fname,const char *descr="")
{ mgl_write_eps(gr, fname, descr); }
/// Write the frame in file using LaTeX format
inline void WriteTEX(const char *fname,const char *descr="")
{ mgl_write_tex(gr, fname, descr); }
/// Write the frame in file using PostScript format as bitmap
inline void WriteBPS(const char *fname,const char *descr="")
{ mgl_write_bps(gr, fname, descr); }
/// Write the frame in file using SVG format
inline void WriteSVG(const char *fname,const char *descr="")
{ mgl_write_svg(gr, fname, descr); }
/// Write the frame in file using GIF format (only for current frame!)
inline void WriteGIF(const char *fname,const char *descr="")
{ mgl_write_gif(gr, fname, descr); }
/// Write the frame in file using OBJ format
inline void WriteOBJ(const char *fname,const char *descr="",bool use_png=true)
{ mgl_write_obj(gr, fname, descr, use_png); }
/// Write the frame in file using OBJ format - Balakin way
inline void WriteOBJold(const char *fname,const char *descr="",bool use_png=true)
{ mgl_write_obj_old(gr, fname, descr, use_png); }
/// Write the frame in file using XYZ format
inline void WriteXYZ(const char *fname,const char *descr="")
{ mgl_write_xyz(gr, fname, descr); }
/// Write the frame in file using STL format (faces only)
inline void WriteSTL(const char *fname,const char *descr="")
{ mgl_write_stl(gr, fname, descr); }
/// Write the frame in file using OFF format
inline void WriteOFF(const char *fname,const char *descr="", bool colored=false)
{ mgl_write_off(gr, fname, descr,colored); }
// /// Write the frame in file using X3D format
// inline void WriteX3D(const char *fname,const char *descr="")
// { mgl_write_x3d(gr, fname, descr); }
/// Write the frame in file using PRC format
inline void WritePRC(const char *fname,const char *descr="",bool make_pdf=true)
{ mgl_write_prc(gr, fname, descr, make_pdf); }
/// Export in JSON format suitable for later drawing by JavaScript
inline void WriteJSON(const char *fname,const char *descr="",bool force_z=false)
{ if(force_z) mgl_write_json_z(gr, fname, descr);
else mgl_write_json(gr, fname, descr); }
/// Return string of JSON data suitable for later drawing by JavaScript
inline const char *GetJSON() { return mgl_get_json(gr); }
/// Force preparing the image. It can be useful for OpenGL mode mostly.
inline void Finish() { mgl_finish(gr); }
/// Create new frame.
inline void NewFrame() { mgl_new_frame(gr); }
/// Finish frame drawing
inline void EndFrame() { mgl_end_frame(gr); }
/// Get the number of created frames
inline int GetNumFrame() { return mgl_get_num_frame(gr); }
/// Reset frames counter (start it from zero)
inline void ResetFrames() { mgl_reset_frames(gr); }
/// Delete primitives for i-th frame (work if MGL_VECT_FRAME is set on)
inline void DelFrame(int i) { mgl_del_frame(gr, i); }
/// Get drawing data for i-th frame (work if MGL_VECT_FRAME is set on)
inline void GetFrame(int i) { mgl_get_frame(gr, i); }
/// Set drawing data for i-th frame (work if MGL_VECT_FRAME is set on). Work as EndFrame() but don't add frame to GIF image.
inline void SetFrame(int i) { mgl_set_frame(gr, i); }
/// Append drawing data from i-th frame (work if MGL_VECT_FRAME is set on)
inline void ShowFrame(int i){ mgl_show_frame(gr, i); }
/// Clear list of primitives for current drawing
inline void ClearFrame() { mgl_clear_frame(gr); }
/// Start write frames to cinema using GIF format
inline void StartGIF(const char *fname, int ms=100)
{ mgl_start_gif(gr, fname,ms); }
/// Stop writing cinema using GIF format
inline void CloseGIF() { mgl_close_gif(gr); }
/// Export points and primitives in file using MGLD format
inline void ExportMGLD(const char *fname, const char *descr=0)
{ mgl_export_mgld(gr, fname, descr); }
/// Import points and primitives from file using MGLD format
inline void ImportMGLD(const char *fname, bool add=false)
{ mgl_import_mgld(gr, fname, add); }
/// Copy RGB values into array which is allocated by user
/** Position of element {i,j} is [3*i + 3*Width*j]. */
inline bool GetRGB(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=3*w*h) memcpy(imgdata, mgl_get_rgb(gr),3*w*h);
return imglen>=3*w*h;
}
/// Get RGB values of current bitmap
/** Position of element {i,j} is [3*i + 3*Width*j]. */
inline const unsigned char *GetRGB() { return mgl_get_rgb(gr); }
/// Copy RGBA values into array which is allocated by user
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline bool GetRGBA(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=4*w*h) memcpy(imgdata, mgl_get_rgba(gr),4*w*h);
return imglen>=4*w*h;
}
/// Get RGBA values of current bitmap
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline const unsigned char *GetRGBA() { return mgl_get_rgba(gr); }
/// Copy BGRN values into array which is allocated by user
inline bool GetBGRN(unsigned char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr), i;
const unsigned char *buf=mgl_get_rgb(gr);
if(imglen>=4*w*h) for(i=0;i<w*h;i++)
{
imgdata[4*i] = buf[3*i+2];
imgdata[4*i+1] = buf[3*i+1];
imgdata[4*i+2] = buf[3*i];
imgdata[4*i+3] = 255;
}
return imglen>=4*w*h;
}
/// Copy RGBA values of background image into array which is allocated by user
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline bool GetBackground(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=4*w*h) memcpy(imgdata, mgl_get_background(gr),4*w*h);
return imglen>=4*w*h;
}
/// Get RGBA values of background image
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline const unsigned char *GetBackground() { return mgl_get_background(gr); }
/// Get width of the image
inline int GetWidth() { return mgl_get_width(gr); }
/// Get height of the image
inline int GetHeight() { return mgl_get_height(gr);}
/// Calculate 3D coordinate {x,y,z} for screen point {xs,ys}
inline mglPoint CalcXYZ(int xs, int ys)
{
mreal x,y,z;
mgl_calc_xyz(gr,xs,ys,&x,&y,&z);
return mglPoint(x,y,z);
}
/// Calculate screen point {xs,ys} for 3D coordinate {x,y,z}
inline mglPoint CalcScr(mglPoint p)
{
int xs,ys;
mgl_calc_scr(gr,p.x,p.y,p.z,&xs,&ys);
return mglPoint(xs,ys);
}
/// Set object/subplot id
inline void SetObjId(int id) { mgl_set_obj_id(gr,id); }
/// Get object id
inline int GetObjId(long x,long y) { return mgl_get_obj_id(gr,x,y); }
/// Get subplot id
inline int GetSplId(long x,long y) { return mgl_get_spl_id(gr,x,y); }
/// Check if {\a xs,\a ys} is close to active point with accuracy d, and return its position or -1
inline long IsActive(int xs, int ys, int d=1) { return mgl_is_active(gr,xs,ys,d); }
/// Combine plots from 2 canvases. Result will be saved into this
inline void Combine(const mglGraph *g) { mgl_combine_gr(gr,g->gr); }
/// Clear up the frame and fill background by specified color
inline void Clf(double r, double g, double b) { mgl_clf_rgb(gr, r, g, b); }
/// Clear up the frame and fill background by specified color with manual transparency
inline void Clf(const char *col) { mgl_clf_str(gr, col); }
/// Clear up the frame and fill background by specified color
inline void Clf(char col) { mgl_clf_chr(gr, col); }
/// Clear up the frame
inline void Clf() { mgl_clf(gr); }
/// Clear unused points and primitives. Useful only in combination with SetFaceNum().
inline void ClearUnused() { mgl_clear_unused(gr); }
/// Load background image
inline void LoadBackground(const char *fname, double alpha=1)
{ mgl_load_background(gr,fname,alpha); }
/// Force drawing the image and use it as background one
inline void Rasterize() { mgl_rasterize(gr); }
/// Draws the point (ball) at position {x,y,z} with color c
inline void Ball(mglPoint p, char c='r')
{ char s[3]={'.',c,0}; mgl_mark(gr, p.x, p.y, p.z, s); }
/// Draws the mark at position p
inline void Mark(mglPoint p, const char *mark)
{ mgl_mark(gr, p.x, p.y, p.z, mark); }
/// Draws the line between points by specified pen
/** Large \a n (for example, n=100) should be used for geodesic line in curved coordinates */
inline void Line(mglPoint p1, mglPoint p2, const char *pen="B",int n=2)
{ mgl_line(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, pen, n); }
/// Draws the spline curve between points by specified pen
inline void Curve(mglPoint p1, mglPoint d1, mglPoint p2, mglPoint d2, const char *pen="B", int n=100)
{ mgl_curve(gr, p1.x, p1.y, p1.z, d1.x, d1.y, d1.z, p2.x, p2.y, p2.z, d2.x, d2.y, d2.z, pen, n); }
/// Draws the 3d error box e for point p
inline void Error(mglPoint p, mglPoint e, const char *pen="k")
{ mgl_error_box(gr, p.x, p.y, p.z, e.x, e.y, e.z, pen); }
/// Draws Lamerey diagram for mapping x_new = f(x_old)
/** String \a stl may contain: ‘v’ for drawing arrows; ‘~’ for disable 1st segment.
* Option value set the number of segments (default is 20).*/
inline void Lamerey(double x0, const mglDataA &f, const char *stl="", const char *opt="")
{ mgl_lamerey_dat(gr,x0,&f,stl,opt); }
inline void Lamerey(double x0, const char *func, const char *stl="", const char *opt="")
{ mgl_lamerey_str(gr,x0,func,stl,opt); }
/// Draws Bifurcation diagram for mapping x_new = f(x_old) in x-axis range
/** Option value set the number of stationary points (default is 1024).*/
inline void Bifurcation(double dx, const mglDataA &f, const char *stl="", const char *opt="")
{ mgl_bifurcation_dat(gr,dx,&f,stl,opt); }
inline void Bifurcation(double dx, const char *func, const char *stl="", const char *opt="")
{ mgl_bifurcation_str(gr,dx,func,stl,opt); }
/// Draws the face between points with color stl (include interpolation up to 4 colors).
inline void Face(mglPoint p1, mglPoint p2, mglPoint p3, mglPoint p4, const char *stl="r")
{ mgl_face(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, p3.x, p3.y, p3.z, p4.x, p4.y, p4.z, stl); }
/// Draws the face in y-z plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceX(mglPoint p, double wy, double wz, const char *stl="w", double dx=0, double dy=0)
{ mgl_facex(gr, p.x, p.y, p.z, wy, wz, stl, dx, dy); }
/// Draws the face in x-z plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceY(mglPoint p, double wx, double wz, const char *stl="w", double dx=0, double dy=0)
{ mgl_facey(gr, p.x, p.y, p.z, wx, wz, stl, dx, dy); }
/// Draws the face in x-y plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceZ(mglPoint p, double wx, double wy, const char *stl="w", double dx=0, double dy=0)
{ mgl_facez(gr, p.x, p.y, p.z, wx, wy, stl, dx, dy); }
/// Draws the drop at point p in direction d with color col and radius r
/** Parameter \a shift set the degree of drop oblongness: ‘0’ is sphere, ‘1’ is maximally oblongness drop. Parameter \a ap set relative width of the drop (this is analogue of “ellipticity” for the sphere).*/
inline void Drop(mglPoint p, mglPoint d, double r, const char *col="r", double shift=1, double ap=1)
{ mgl_drop(gr, p.x, p.y, p.z, d.x, d.y, d.z, r, col, shift, ap); }
/// Draws the sphere at point p with color col and radius r
inline void Sphere(mglPoint p, double r, const char *col="r")
{ mgl_sphere(gr, p.x, p.y, p.z, r, col); }
/// Draws the cone between points p1,p2 with radius r1,r2 and with style stl
/** Parameter \a stl can contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinder instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones.*/
inline void Cone(mglPoint p1, mglPoint p2, double r1, double r2=-1, const char *stl="r@")
{ mgl_cone(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z,r1,r2,stl); }
/// Draws the ellipse between points p1,p2 with color stl and width r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Ellipse(mglPoint p1, mglPoint p2, double r, const char *stl="r")
{ mgl_ellipse(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, r,stl); }
/// Draws the circle at point p with color stl and radius r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Circle(mglPoint p, double r, const char *stl="r")
{ mgl_ellipse(gr, p.x, p.y, p.z, p.x, p.y, p.z, r,stl); }
/// Draws the rhomb between points p1,p2 with color stl and width r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Rhomb(mglPoint p1, mglPoint p2, double r, const char *stl="r")
{ mgl_rhomb(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, r,stl); }
/// Draws the polygon based on points p1,p2 with color stl
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Polygon(mglPoint p1, mglPoint p2, int n, const char *stl="r")
{ mgl_polygon(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, n,stl); }
/// Draws the arc around axis pr with center at p0 and starting from p1, by color stl and angle a (in degrees)
inline void Arc(mglPoint p0, mglPoint pr, mglPoint p1, double a, const char *stl="r")
{ mgl_arc_ext(gr, p0.x,p0.y,p0.z, pr.x,pr.y,pr.z, p1.x,p1.y,p1.z, a,stl); }
/// Draws the arc around axis 'z' with center at p0 and starting from p1, by color stl and angle a (in degrees)
inline void Arc(mglPoint p0, mglPoint p1, double a, const char *stl="r")
{ mgl_arc_ext(gr, p0.x,p0.y,p0.z, 0,0,1, p1.x,p1.y,p0.z, a,stl); }
/// Draws bitmap (logo) which is stretched along whole axis range
inline void Logo(long w, long h, const unsigned char *rgba, bool smooth=false, const char *opt="")
{ mgl_logo(gr, w, h, rgba, smooth, opt); }
inline void Logo(const char *fname, bool smooth=false, const char *opt="")
{ mgl_logo_file(gr, fname, smooth, opt); }
/// Print text in position p with specified font
inline void Putsw(mglPoint p,const wchar_t *text,const char *font=":C",double size=-1)
{ mgl_putsw(gr, p.x, p.y, p.z, text, font, size); }
/// Print text in position p with specified font
inline void Puts(mglPoint p,const char *text,const char *font=":C",double size=-1)
{ mgl_puts(gr, p.x, p.y, p.z, text, font, size); }
/// Print text in position p with specified font
inline void Putsw(double x, double y,const wchar_t *text,const char *font=":AC",double size=-1)
{ mgl_putsw(gr, x, y, 0, text, font, size); }
/// Print text in position p with specified font
inline void Puts(double x, double y,const char *text,const char *font=":AC",double size=-1)
{ mgl_puts(gr, x, y, 0, text, font, size); }
/// Print text in position p along direction d with specified font
inline void Putsw(mglPoint p, mglPoint d, const wchar_t *text, const char *font=":L", double size=-1)
{ mgl_putsw_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, text, font, size); }
/// Print text in position p along direction d with specified font
inline void Puts(mglPoint p, mglPoint d, const char *text, const char *font=":L", double size=-1)
{ mgl_puts_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, text, font, size); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *text, const char *font="", const char *opt="")
{ mgl_text_xyz(gr, &x, &y, &z, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const char *text, const char *font="", const char *opt="")
{ mgl_text_xy(gr, &x, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &y, const char *text, const char *font="", const char *opt="")
{ mgl_text_y(gr, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const mglDataA &z, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_xyz(gr, &x, &y, &z, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_xy(gr, &x, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &y, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_y(gr, &y, text, font, opt); }
/// Draws bounding box outside the plotting volume with color c.
/** Style ‘@’ produce filled back faces. */
inline void Box(const char *col="", bool ticks=true)
{ mgl_box_str(gr, col, ticks); }
/// Draw axises with ticks in direction(s) dir.
/** Parameter \a dir may contain:
* ‘xyzt’for drawing axis in corresponding direction;
* ‘XYZT’ for drawing axis in corresponding direction but with inverted positions of labels;
* ‘~’, ‘_’ for disabling tick labels;
* ‘U’ for disabling rotation of tick labels;
* ‘^’ for inverting default axis origin;
* ‘!’ for disabling ticks tuning;
* ‘AKDTVISO’ for drawing arrow at the end of axis;
* ‘a’ for forced adjusting of axis ticks;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.
* Option "value" set the manual rotation angle for the ticks. */
inline void Axis(const char *dir="xyzt", const char *stl="", const char *opt="")
{ mgl_axis(gr, dir,stl,opt); }
/// Draw grid lines perpendicular to direction(s) dir.
inline void Grid(const char *dir="xyzt",const char *pen="B", const char *opt="")
{ mgl_axis_grid(gr, dir, pen, opt); }
/// Print the label text for axis dir.
/** Option "value" set additional shifting of the label. */
inline void Label(char dir, const char *text, double pos=+1, const char *opt="")
{ mgl_label(gr, dir, text, pos, opt); }
/// Print the label text for axis dir.
/** Option "value" set additional shifting of the label. */
inline void Label(char dir, const wchar_t *text, double pos=+1, const char *opt="")
{ mgl_labelw(gr, dir, text, pos, opt); }
/// Draw colorbar at edge of axis
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const char *sch="")
{ mgl_colorbar(gr, sch); }
/// Draw colorbar at manual position
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const char *sch,double x,double y,double w=1,double h=1)
{ mgl_colorbar_ext(gr, sch, x,y,w,h); }
/// Draw colorbar with manual colors at edge of axis
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const mglDataA &val, const char *sch="")
{ mgl_colorbar_val(gr, &val, sch); }
/// Draw colorbar with manual colors at manual position
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const mglDataA &val, const char *sch,double x,double y,double w=1,double h=1)
{ mgl_colorbar_val_ext(gr, &val, sch, x,y,w,h); }
/// Add string to legend
inline void AddLegend(const char *text,const char *style)
{ mgl_add_legend(gr, text, style); }
inline void AddLegend(const wchar_t *text,const char *style)
{ mgl_add_legendw(gr, text, style); }
/// Clear saved legend string
inline void ClearLegend()
{ mgl_clear_legend(gr); }
/// Draw legend of accumulated strings at position {x,y}
/** Parameter fnt may contain:
* font style for legend text;
* colors for background (first one), border (second one) and text (last one);
* ‘A’ for positioning in absolute coordinates;
* ‘^’ for positioning outside of specified point;
* ‘-’ for arranging entries horizontally;
* ‘#’ for drawing box around legend.
* Option value set the space between line samples and text (default is 0.1).*/
inline void Legend(double x, double y, const char *font="#", const char *opt="")
{ mgl_legend_pos(gr, x, y, font, opt); }
/// Draw legend of accumulated strings
/** Parameter fnt may contain:
* font style for legend text;
* colors for background (first one), border (second one) and text (last one);
* ‘A’ for positioning in absolute coordinates;
* ‘^’ for positioning outside of specified point;
* ‘-’ for arranging entries horizontally;
* ‘#’ for drawing box around legend.
* Option value set the space between line samples and text (default is 0.1).
* Parameter \a where sets position: 0 at bottom-left, 1 at bottom-right, 2 at top-left, 3 at top-right (default).*/
inline void Legend(int where=3, const char *font="#", const char *opt="")
{ mgl_legend(gr, where, font, opt); }
/// Set number of marks in legend sample
inline void SetLegendMarks(int num) { mgl_set_legend_marks(gr, num); }
/// Draw usual curve {x,y,z}
inline void Plot(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_plot_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw usual curve {x,y}
inline void Plot(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_plot_xy(gr, &x, &y, pen,opt); }
/// Draw usual curve {x,y} with x in x-axis range
inline void Plot(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_plot(gr, &y, pen,opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y,z}
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_tape_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y}
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_tape_xy(gr, &x, &y, pen,opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y} with x in x-axis range
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_tape(gr, &y, pen,opt); }
/// Draw radar chart (plot in curved coordinates)
/** Option "value" set the additional shift of data (i.e. the data a+value is used instead of a).*/
inline void Radar(const mglDataA &a, const char *pen="", const char *opt="")
{ mgl_radar(gr, &a, pen, opt); }
/// Draw stairs for points in arrays {x,y,z}
inline void Step(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_step_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw stairs for points in arrays {x,y}
inline void Step(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_step_xy(gr, &x, &y, pen, opt); }
/// Draw stairs for points in arrays {x,y} with x in x-axis range
inline void Step(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_step(gr, &y, pen, opt); }
/// Draw curve {x,y,z} which is colored by c (like tension plot)
inline void Tens(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens_xyz(gr, &x, &y, &z, &c, pen, opt); }
/// Draw curve {x,y} which is colored by c (like tension plot)
inline void Tens(const mglDataA &x, const mglDataA &y, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens_xy(gr, &x, &y, &c, pen, opt); }
/// Draw curve {x,y} with x in x-axis range which is colored by c (like tension plot)
inline void Tens(const mglDataA &y, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens(gr, &y, &c, pen, opt); }
/// Fill area between curve {x,y,z} and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_area_xyz(gr, &x, &y, &z, pen, opt); }
/// Fill area between curve {x,y} and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_area_xy(gr, &x, &y, pen, opt); }
/// Fill area between curve {x,y} with x in x-axis range and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_area(gr, &y, pen, opt); }
/// Fill area between curves {x,y1} and {x,y2} with x in x-axis range
/** Style 'i' will fill area only if y1 < y2.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region(gr, &y1, &y2, pen, opt); }
/// Fill area between curves {x,y1} and {x,y2}
/** Style 'i' will fill area only if y1 < y2.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region_xy(gr, &x, &y1, &y2, pen, opt); }
/// Fill area (draw ribbon) between curves {x1,y1,z1} and {x2,y2,z2}
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x1, const mglDataA &y1, const mglDataA &z1, const mglDataA &x2, const mglDataA &y2, const mglDataA &z2, const char *pen="", const char *opt="")
{ mgl_region_3d(gr, &x1, &y1, &z1, &x2, &y2, &z2, pen, opt); }
/// Fill area (draw ribbon) between curves {x1,y1} and {x2,y2}
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x1, const mglDataA &y1, const mglDataA &x2, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region_3d(gr, &x1, &y1, NULL, &x2, &y2, NULL, pen, opt); }
/// Draw vertical lines from points {x,y,z} to axis plane
inline void Stem(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_stem_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw vertical lines from points {x,y} to axis plane
inline void Stem(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_stem_xy(gr, &x, &y, pen, opt); }
/// Draw vertical lines from points {x,y} with x in x-axis range to axis plane
inline void Stem(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_stem(gr, &y, pen, opt); }
/// Draw vertical bars from points {x,y,z} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_bars_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw vertical bars from points {x,y} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_bars_xy(gr, &x, &y, pen, opt); }
/// Draw vertical bars from points {x,y} with x in x-axis range to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_bars(gr, &y, pen, opt); }
/// Draw horizontal bars from points {x,y} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Barh(const mglDataA &y, const mglDataA &v, const char *pen="", const char *opt="")
{ mgl_barh_yx(gr, &y, &v, pen, opt); }
/// Draw horizontal bars from points {x,y} with y in y-axis range to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Barh(const mglDataA &v, const char *pen="", const char *opt="")
{ mgl_barh(gr, &v, pen, opt); }
/// Draw chart for data a
/** Space denote transparent color. Style '#' draw black borders. */
inline void Chart(const mglDataA &a, const char *colors="", const char *opt="")
{ mgl_chart(gr, &a, colors,opt); }
/// Draw Open-High-Low-Close (OHLC) diagram
/** Different colors for up and down values are used if number of specified colors is equal to 2*number of curves. */
inline void OHLC(const mglDataA &x, const mglDataA &open, const mglDataA &high, const mglDataA &low, const mglDataA &close, const char *pen="", const char *opt="")
{ mgl_ohlc_x(gr, &x, &open,&high,&low,&close,pen,opt); }
/// Draw Open-High-Low-Close (OHLC) diagram with x in x-axis range
/** Different colors for up and down values are used if number of specified colors is equal to 2*number of curves. */
inline void OHLC(const mglDataA &open, const mglDataA &high, const mglDataA &low, const mglDataA &close, const char *pen="", const char *opt="")
{ mgl_ohlc(gr, &open,&high,&low,&close,pen,opt); }
/// Draw box-plot (special 5-value plot used in statistic)
/** String \a pen may contain ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.*/
inline void BoxPlot(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_boxplot_xy(gr, &x, &y, pen,opt); }
/// Draw box-plot (special 5-value plot used in statistic) with x in x-axis range
/** String \a pen may contain ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.*/
inline void BoxPlot(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_boxplot(gr, &y, pen,opt); }
/// Draw candle plot
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &x, const mglDataA &v1, const mglDataA &v2, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle_xyv(gr, &x, &v1, &v2, &y1, &y2, pen, opt); }
/// Draw candle plot with x in x-axis range
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &v1, const mglDataA &v2, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle_yv(gr, &v1, &v2, &y1, &y2, pen, opt); }
inline void Candle(const mglDataA &v1, const mglDataA &v2, const char *pen="", const char *opt="")
{ mgl_candle_yv(gr, &v1, &v2, NULL, NULL, pen, opt); }
/// Draw candle plot with v1=v[i], v2=v[i+1]
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &y, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle(gr, &y, &y1, &y2, pen, opt); }
/// Draw candle plot with v1=v[i], v2=v[i+1]
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_candle(gr, &y, NULL, NULL, pen, opt); }
/// Draw cones from points {x,y,z} to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw cones from points {x,z} to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &x, const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones_xz(gr, &x, &z, pen, opt); }
/// Draw cones from points {x,z} with x in x-axis range to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones(gr, &z, pen, opt); }
/// Draw error boxes {ey} at points {x,y} with x in x-axis range
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &y, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error(gr, &y, &ey, pen, opt); }
/// Draw error boxes {ey} at points {x,y}
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &x, const mglDataA &y, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error_xy(gr, &x, &y, &ey, pen, opt); }
/// Draw error boxes {ex,ey} at points {x,y}
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &x, const mglDataA &y, const mglDataA &ex, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error_exy(gr, &x, &y, &ex, &ey, pen, opt); }
/// Draw marks with size r at points {x,y,z}
inline void Mark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_xyz(gr, &x, &y, &z, &r, pen, opt); }
/// Draw marks with size r at points {x,y}
inline void Mark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_xy(gr, &x, &y, &r, pen, opt); }
/// Draw marks with size r at points {x,y} with x in x-axis range
inline void Mark(const mglDataA &y, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_y(gr, &y, &r, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y,z}
inline void Pmap(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap_xyz(gr, &x, &y, &z, &s, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y}
inline void Pmap(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap_xy(gr, &x, &y, &s, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y} with x in x-axis range
inline void Pmap(const mglDataA &y, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap(gr, &y, &s, pen, opt); }
/// Draw textual marks with size r at points {x,y,z}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_xyzr(gr, &x, &y, &z, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_xyr(gr, &x, &y, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_yr(gr, &y, &r, text, fnt, opt); }
/// Draw textual marks at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark(gr, &y, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y,z}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_xyzr(gr, &x, &y, &z, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_xyr(gr, &x, &y, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_yr(gr, &y, &r, text, fnt, opt); }
/// Draw textual marks at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw(gr, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y,z}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_xyz(gr, &x, &y, &z, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_xy(gr, &x, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y} with x in x-axis range
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_y(gr, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y,z}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const mglDataA &z, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_xyz(gr, &x, &y, &z, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_xy(gr, &x, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y} with x in x-axis range
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_y(gr, &y, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(const mglDataA &val, const char *text, const char *fnt="#|", const char *opt="")
{ mgl_table(gr, 0, 0, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(const mglDataA &val, const wchar_t *text, const char *fnt="#|", const char *opt="")
{ mgl_tablew(gr, 0, 0, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text at given position
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(double x, double y, const mglDataA &val, const char *text, const char *fnt="#|", const char *opt="")
{ mgl_table(gr, x, y, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text at given position
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(double x, double y, const mglDataA &val, const wchar_t *text, const char *fnt="#|", const char *opt="")
{ mgl_tablew(gr, x, y, &val, text, fnt, opt); }
/// Draw tube with radius r around curve {x,y,z}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_xyzr(gr, &x, &y, &z, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y,z}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &z, double r, const char *pen="", const char *opt="")
{ mgl_tube_xyz(gr, &x, &y, &z, r, pen, opt); }
/// Draw tube with radius r around curve {x,y}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_xyr(gr, &x, &y, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y}
inline void Tube(const mglDataA &x, const mglDataA &y, double r, const char *pen="", const char *opt="")
{ mgl_tube_xy(gr, &x, &y, r, pen, opt); }
/// Draw tube with radius r around curve {x,y} with x in x-axis range
inline void Tube(const mglDataA &y, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_r(gr, &y, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y} with x in x-axis range
inline void Tube(const mglDataA &y, double r, const char *pen="", const char *opt="")
{ mgl_tube(gr, &y, r, pen, opt); }
/// Draw surface of curve {r,z} rotation around axis
/** Style ‘#’ produce wire plot. Style ‘.’ produce plot by dots.*/
inline void Torus(const mglDataA &r, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_torus(gr, &r, &z, pen,opt); }
/// Draw mesh lines for 2d data specified parametrically
inline void Mesh(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_mesh_xy(gr, &x, &y, &z, stl, opt); }
/// Draw mesh lines for 2d data
inline void Mesh(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_mesh(gr, &z, stl, opt); }
/// Draw waterfall plot for 2d data specified parametrically
/** Style 'x' draw lines in x-direction. */
inline void Fall(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_fall_xy(gr, &x, &y, &z, stl, opt); }
/// Draw waterfall plot for 2d data
/** Style 'x' draw lines in x-direction. */
inline void Fall(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_fall(gr, &z, stl, opt); }
/// Draw belts for 2d data specified parametrically
/** Style 'x' draw belts in x-direction. */
inline void Belt(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_belt_xy(gr, &x, &y, &z, stl, opt); }
/// Draw belts for 2d data
/** Style 'x' draw belts in x-direction. */
inline void Belt(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_belt(gr, &z, stl, opt); }
/// Draw surface for 2d data specified parametrically with color proportional to z
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Surf(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_surf_xy(gr, &x, &y, &z, stl, opt); }
/// Draw surface for 2d data with color proportional to z
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Surf(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_surf(gr, &z, stl, opt); }
/// Draw grid lines for density plot of 2d data specified parametrically
inline void Grid(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_grid_xy(gr, &x, &y, &z, stl, opt); }
/// Draw grid lines for density plot of 2d data
inline void Grid(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_grid(gr, &z, stl, opt); }
/// Draw vertical tiles for 2d data specified parametrically
inline void Tile(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_tile_xy(gr, &x, &y, &z, stl, opt); }
/// Draw vertical tiles for 2d data
inline void Tile(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_tile(gr, &z, stl, opt); }
/// Draw density plot for 2d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Dens(const mglDataA &x, const mglDataA &y, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_dens_xy(gr, &x, &y, &c, stl, opt); }
/// Draw density plot for 2d data
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Dens(const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_dens(gr, &c, stl, opt); }
/// Draw vertical boxes for 2d data specified parametrically
/** Style ‘#’ draw filled boxes. */
inline void Boxs(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_boxs_xy(gr, &x, &y, &z, stl, opt); }
/// Draw vertical boxes for 2d data
/** Style ‘#’ draw filled boxes. */
inline void Boxs(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_boxs(gr, &z, stl, opt); }
/// Draw contour lines at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void Cont(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw contour lines for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void Cont(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_val(gr, &v, &z, sch, opt); }
/// Draw contour lines at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_xy(gr, &x, &y, &z, sch, opt); }
/// Draw contour lines for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont(gr, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContF(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContF(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_val(gr, &v, &z, sch, opt); }
/// Draw solid contours for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContF(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_xy(gr, &x, &y, &z, sch, opt); }
/// Draw solid contours for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContF(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf(gr, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data specified parametrically with specified colors
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContD(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data with specified colors
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContD(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_val(gr, &v, &z, sch, opt); }
/// Draw solid contours for 2d data specified parametrically with specified colors
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContD(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_xy(gr, &x, &y, &z, sch, opt); }
/// Draw solid contours for 2d data with specified colors
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContD(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd(gr, &z, sch, opt); }
/// Draw contour tubes between manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContV(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw contour tubes between manual levels for 2d data
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContV(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_val(gr, &v, &z, sch, opt); }
/// Draw contour tubes for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContV(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_xy(gr, &x, &y, &z, sch, opt); }
/// Draw contour tubes for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContV(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv(gr, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces at manual levels for 2d data specified parametrically
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis). */
inline void Axial(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_xy_val(gr, &v, &x, &y, &z, sch,opt); }
/// Draw axial-symmetric isosurfaces at manual levels for 2d data
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis). */
inline void Axial(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_val(gr, &v, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces for 2d data specified parametrically
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis).
* Option "value" set the number of isosurfaces (default is 3). */
inline void Axial(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_xy(gr, &x, &y, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces for 2d data
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis).
* Option "value" set the number of isosurfaces (default is 3). */
inline void Axial(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial(gr, &z, sch, opt); }
/// Draw grid lines for density plot at slice for 3d data specified parametrically
/** Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Grid3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_grid3_xyz(gr, &x, &y, &z, &a, stl, sVal, opt); }
/// Draw grid lines for density plot at slice for 3d data
/** Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Grid3(const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_grid3(gr, &a, stl, sVal, opt); }
/// Draw density plot at slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Dens3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_dens3_xyz(gr, &x, &y, &z, &a, stl, sVal, opt); }
/// Draw density plot at slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Dens3(const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_dens3(gr, &a, stl, sVal, opt); }
/// Draw isosurface for 3d data specified parametrically
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.*/
inline void Surf3(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_xyz_val(gr, Val, &x, &y, &z, &a, stl, opt); }
/// Draw isosurface for 3d data
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.*/
inline void Surf3(double Val, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_val(gr, Val, &a, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_xyz(gr, &x, &y, &z, &a, stl, opt); }
/// Draw isosurfaces for 3d data
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3(const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3(gr, &a, stl, opt); }
/// Draw a semi-transparent cloud for 3d data specified parametrically
/** Style ‘.’ produce plot by dots. Style ‘i’ use inverted values for transparency. */
inline void Cloud(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_cloud_xyz(gr, &x, &y, &z, &a, stl, opt); }
/// Draw a semi-transparent cloud for 3d data
/** Style ‘.’ produce plot by dots. Style ‘i’ use inverted values for transparency. */
inline void Cloud(const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_cloud(gr, &a, stl, opt); }
/// Draw contour lines at manual levels along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void Cont3(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_xyz_val(gr, &v, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw contour lines at manual levels along slice for 3d data
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void Cont3(const mglDataA &v, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_val(gr, &v, &a, sch, sVal, opt); }
/// Draw contour lines along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_xyz(gr, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw contour lines along slice for 3d data
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont3(const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3(gr, &a, sch, sVal, opt); }
/// Draw solid contours at manual levels along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly. */
inline void ContF3(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_xyz_val(gr, &v, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw solid contours at manual levels along slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly. */
inline void ContF3(const mglDataA &v, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_val(gr, &v, &a, sch, sVal, opt); }
/// Draw solid contours along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Option "value" set the number of contour levels (default is 7).*/
inline void ContF3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_xyz(gr, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw solid contours along slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Option "value" set the number of contour levels (default is 7).*/
inline void ContF3(const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3(gr, &a, sch, sVal, opt); }
/// Draw several isosurfaces for 3d beam in curvilinear coordinates
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.
* Variable \a flag is bitwise:
* ‘0x1’ - draw in accompanied (not laboratory) coordinates;
* ‘0x2’ - draw projection to \rho-z plane;
* ‘0x4’ - draw normalized in each slice field.*/
inline void Beam(const mglDataA &tr, const mglDataA &g1, const mglDataA &g2, const mglDataA &a, double r, const char *stl=0, int flag=0, int num=3)
{ mgl_beam(gr, &tr,&g1,&g2,&a,r,stl,flag,num); }
/// Draw isosurface at value \a val for 3d beam in curvilinear coordinates
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.
* Variable \a flag is bitwise:
* ‘0x1’ - draw in accompanied (not laboratory) coordinates;
* ‘0x2’ - draw projection to \rho-z plane;
* ‘0x4’ - draw normalized in each slice field.*/
inline void Beam(double val, const mglDataA &tr, const mglDataA &g1, const mglDataA &g2, const mglDataA &a, double r, const char *stl=NULL, int flag=0)
{ mgl_beam_val(gr,val,&tr,&g1,&g2,&a,r,stl,flag); }
/// Draw vertical tiles with variable size r for 2d data specified parametrically
inline void TileS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *stl="", const char *opt="")
{ mgl_tiles_xy(gr, &x, &y, &z, &r, stl, opt); }
/// Draw vertical tiles with variable size r for 2d data
inline void TileS(const mglDataA &z, const mglDataA &r, const char *stl="", const char *opt="")
{ mgl_tiles(gr, &z, &r, stl, opt); }
/// Draw surface for 2d data specified parametrically with color proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfC(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfc_xy(gr, &x, &y, &z, &c, sch,opt); }
/// Draw surface for 2d data with color proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfC(const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfc(gr, &z, &c, sch,opt); }
/// Draw surface for 2d data specified parametrically with alpha proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfa_xy(gr, &x, &y, &z, &c, sch,opt); }
/// Draw surface for 2d data with alpha proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfA(const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfa(gr, &z, &c, sch,opt); }
/// Draw surface for 2d data specified parametrically with color proportional to c and alpha proportional to a
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfCA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_surfca_xy(gr, &x, &y, &z, &c, &a, sch,opt); }
/// Draw surface for 2d data with color proportional to c and alpha proportional to a
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfCA(const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_surfca(gr, &z, &c, &a, sch,opt); }
/// Color map of matrix a to matrix b, both matrix can parametrically depend on coordinates
/** Style ‘.’ produce plot by dots. */
inline void Map(const mglDataA &x, const mglDataA &y, const mglDataA &a, const mglDataA &b, const char *sch="", const char *opt="")
{ mgl_map_xy(gr, &x, &y, &a, &b, sch, opt); }
/// Color map of matrix a to matrix b
/** Style ‘.’ produce plot by dots. */
inline void Map(const mglDataA &a, const mglDataA &b, const char *sch="", const char *opt="")
{ mgl_map(gr, &a, &b, sch, opt); }
/// Draw density plot for spectra-gramm specified parametrically
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void STFA(const mglDataA &x, const mglDataA &y, const mglDataA &re, const mglDataA &im, int dn, const char *sch="", const char *opt="")
{ mgl_stfa_xy(gr, &x, &y, &re, &im, dn, sch, opt); }
/// Draw density plot for spectra-gramm
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void STFA(const mglDataA &re, const mglDataA &im, int dn, const char *sch="", const char *opt="")
{ mgl_stfa(gr, &re, &im, dn, sch, opt); }
/// Draw isosurface for 3d data specified parametrically with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3A(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_xyz_val(gr, Val, &x, &y, &z, &a, &b, stl, opt); }
/// Draw isosurface for 3d data with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3A(double Val, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_val(gr, Val, &a, &b, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3A(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_xyz(gr, &x, &y, &z, &a, &b, stl, opt); }
/// Draw isosurfaces for 3d data with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3A(const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a(gr, &a, &b, stl, opt); }
/// Draw isosurface for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3C(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_xyz_val(gr, Val, &x, &y, &z, &a, &c, stl,opt); }
/// Draw isosurface for 3d data with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3C(double Val, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_val(gr, Val, &a, &c, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3C(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_xyz(gr, &x, &y, &z, &a, &c, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3C(const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c(gr, &a, &c, stl, opt); }
/// Draw isosurface for 3d data specified parametrically with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3CA(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_xyz_val(gr, Val, &x, &y, &z, &a, &c, &b, stl,opt); }
/// Draw isosurface for 3d data with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3CA(double Val, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_val(gr, Val, &a, &c, &b, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3CA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_xyz(gr, &x, &y, &z, &a, &c, &b, stl, opt); }
/// Draw isosurfaces for 3d data with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3CA(const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca(gr, &a, &c, &b, stl, opt); }
/// Plot dew drops for vector field {ax,ay} parametrically depended on coordinate {x,y}
inline void Dew(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_dew_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot dew drops for vector field {ax,ay}
inline void Dew(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_dew_2d(gr, &ax, &ay, sch, opt); }
/// Plot vectors at position {x,y} along {ax,ay} with length/color proportional to |a|
/** Option value set the vector length factor (if non-zero) or vector length to be proportional the distance between curve points (if value=0). */
inline void Traj(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_traj_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot vectors at position {x,y,z} along {ax,ay,az} with length/color proportional to |a|
/** Option value set the vector length factor (if non-zero) or vector length to be proportional the distance between curve points (if value=0). */
inline void Traj(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_traj_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot vector field {ax,ay} parametrically depended on coordinate {x,y} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_vect_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot vector field {ax,ay} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_vect_2d(gr, &ax, &ay, sch, opt); }
/// Plot vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_vect_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot vector field {ax,ay,az} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_vect_3d(gr, &ax, &ay, &az, sch, opt); }
/// Draw vector plot along slice for 3d data specified parametrically
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows,
* ‘ x’, ‘z’ for producing plot perpendicular to x- or z-direction correspondingly. */
inline void Vect3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_vect3_xyz(gr, &x, &y, &z, &ax,&ay,&az, stl, sVal, opt); }
/// Draw vector plot along slice for 3d data
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows,
* ‘ x’, ‘z’ for producing plot perpendicular to x- or z-direction correspondingly. */
inline void Vect3(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_vect3(gr, &ax,&ay,&az, stl, sVal, opt); }
/// Plot flows for vector field {ax,ay} parametrically depended on coordinate {x,y} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flow_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot flows for vector field {ax,ay} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flow_2d(gr, &ax, &ay, sch, opt); }
/// Plot flows for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flow_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot flows for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flow_3d(gr, &ax, &ay, &az, sch, opt); }
/// Plot flow from point p for vector field {ax,ay} parametrically depended on coordinate {x,y} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads. */
inline void FlowP(mglPoint p, const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flowp_xy(gr, p.x, p.y, p.z, &x, &y, &ax, &ay, sch, opt); }
/// Plot flow from point p for vector field {ax,ay} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads. */
inline void FlowP(mglPoint p, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flowp_2d(gr, p.x, p.y, p.z, &ax, &ay, sch, opt); }
/// Plot flow from point p for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly. */
inline void FlowP(mglPoint p, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flowp_xyz(gr, p.x, p.y, p.z, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot flow from point p for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly. */
inline void FlowP(mglPoint p, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flowp_3d(gr, p.x, p.y, p.z, &ax, &ay, &az, sch, opt); }
/// Plot flows for gradient of scalar field phi parametrically depended on coordinate {x,y,z}
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad_xyz(gr,&x,&y,&z,&phi,sch,opt); }
/// Plot flows for gradient of scalar field phi parametrically depended on coordinate {x,y}
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &x, const mglDataA &y, const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad_xy(gr,&x,&y,&phi,sch,opt); }
/// Plot flows for gradient of scalar field phi
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad(gr,&phi,sch,opt); }
/// Plot flow pipes for vector field {ax,ay} parametrically depended on coordinate {x,y} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_xy(gr, &x, &y, &ax, &ay, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &ax, const mglDataA &ay, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_2d(gr, &ax, &ay, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay,az} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_3d(gr, &ax, &ay, &az, sch, r0, opt); }
/// Draw density plot for data at x = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_x(gr, &a, stl, sVal, opt); }
/// Draw density plot for data at y = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_y(gr, &a, stl, sVal, opt); }
/// Draw density plot for data at z = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_z(gr, &a, stl, sVal, opt); }
/// Draw contour lines for data at x = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_x(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at x = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContX(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_x_val(gr, &v, &a, stl, sVal, opt); }
/// Draw contour lines for data at y = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_y(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at y = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContY(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_y_val(gr, &v, &a, stl, sVal, opt); }
/// Draw contour lines for data at z = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_z(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at z = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContZ(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_z_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at x = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_x(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at x = sVal
inline void ContFX(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_x_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at y = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_y(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at y = sVal
inline void ContFY(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_y_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at z = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_z(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at z = sVal
inline void ContFZ(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_z_val(gr, &v, &a, stl, sVal, opt); }
/// Draw curve for formula with x in x-axis range
/** Option "value" set initial number of points. */
inline void FPlot(const char *fy, const char *stl="", const char *opt="")
{ mgl_fplot(gr, fy, stl, opt); }
/// Draw curve for formulas parametrically depended on t in range [0,1]
/** Option "value" set initial number of points. */
inline void FPlot(const char *fx, const char *fy, const char *fz, const char *stl, const char *opt="")
{ mgl_fplot_xyz(gr, fx, fy, fz, stl, opt); }
/// Draw surface by formula with x,y in axis range
/** Option "value" set initial number of points. */
inline void FSurf(const char *fz, const char *stl="", const char *opt="")
{ mgl_fsurf(gr, fz, stl, opt); }
/// Draw surface by formulas parametrically depended on u,v in range [0,1]
/** Option "value" set initial number of points. */
inline void FSurf(const char *fx, const char *fy, const char *fz, const char *stl, const char *opt="")
{ mgl_fsurf_xyz(gr, fx, fy, fz, stl, opt); }
/// Draw triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘#’ produce wire plot. If id.ny=c.nx then c set the triangle colors, else vertex colors. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_triplot_xyzc(gr, &nums, &x, &y, &z, &c, sch, opt); }
/// Draw triangle mesh for points in arrays {x,y,z}
/** Style ‘#’ produce wire plot. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_triplot_xyz(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw triangle mesh for points in arrays {x,y}
/** Style ‘#’ produce wire plot. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const char *sch="", const char *opt="")
{ mgl_triplot_xy(gr, &nums, &x, &y, sch, opt); }
/// Draw quad mesh for points in arrays {x,y,z} with specified color c
/** Style ‘#’ produce wire plot. If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_quadplot_xyzc(gr, &nums, &x, &y, &z, &c, sch, opt); }
/// Draw quad mesh for points in arrays {x,y,z}
/** Style ‘#’ produce wire plot. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_quadplot_xyz(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw quad mesh for points in arrays {x,y}
/** Style ‘#’ produce wire plot. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const char *sch="", const char *opt="")
{ mgl_quadplot_xy(gr, &nums, &x, &y, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z}
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricont_xyc(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z}
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors.
* Option "value" set the number of contour levels (default is 7). */
inline void TriContV(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricont_xycv(gr, &v, &nums, &x, &y, &z, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzc(gr, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriContV(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z}
/** Option "value" set the number of contour levels (default is 7). */
inline void TriContVt(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricontv_xyc(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z} with specified color c
/** Option "value" set the number of contour levels (default is 7). */
inline void TriContVt(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricontv_xyzc(gr, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z} with specified color c
/** If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriContVt(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricontv_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw dots in points {x,y,z}.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_dots(gr, &x, &y, &z, sch, opt); }
/// Draw semitransparent dots in points {x,y,z} with specified alpha a.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_dots_a(gr, &x, &y, &z, &a, sch, opt); }
/// Draw semitransparent dots in points {x,y,z} with specified color c and alpha a.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_dots_ca(gr, &x, &y, &z, &c, &a, sch, opt); }
/// Draw surface reconstructed for points in arrays {x,y,z}.
/** Style ‘#’ produce wired plot. */
inline void Crust(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_crust(gr, &x, &y, &z, sch, opt); }
/// Fit data along x-direction for each data row. Return array with values for found formula.
inline mglData Fit(const mglDataA &y, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_1(gr, &y, eq,vars,0, opt)); }
/// Fit data along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &y, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_1(gr, &y, eq, vars, &ini, opt)); }
/// Fit data along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData Fit2(const mglDataA &z, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_2(gr, &z, eq, vars,0, opt)); }
/// Fit data along x-, y-direction for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData Fit2(const mglDataA &z, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_2(gr, &z, eq, vars, &ini, opt)); }
/// Fit data along along all directions. Return array with values for found formula.
inline mglData Fit3(const mglDataA &a, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_3(gr, &a, eq, vars,0, opt)); }
/// Fit data along all directions starting from \a ini values. Return array with values for found formula.
inline mglData Fit3(const mglDataA &a, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_3(gr, &a, eq, vars, &ini, opt)); }
/// Fit data along x-direction for each data row. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xy(gr, &x, &y, eq, vars,0, opt)); }
/// Fit data along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xy(gr, &x, &y, eq, vars, &ini, opt)); }
/// Fit data along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyz(gr, &x, &y, &z, eq, vars,0, opt)); }
/// Fit data along x-, y-directions for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyz(gr, &x, &y, &z, eq, vars, &ini, opt)); }
/// Fit data along along all directions. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyza(gr, &x, &y, &z, &a, eq, vars,0, opt)); }
/// Fit data along along all directions starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyza(gr, &x, &y, &z, &a, eq,vars, &ini, opt)); }
/// Fit data with dispersion s along x-direction for each data row. Return array with values for found formula.
inline mglData FitS(const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_ys(gr, &y, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_ys(gr, &y, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along x-direction for each data row. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xys(gr, &x, &y, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xys(gr, &x, &y, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyzs(gr, &x, &y, &z, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-, y-directions for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyzs(gr, &x, &y, &z, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along all directions. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyzas(gr, &x, &y, &z, &a, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along all directions starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyzas(gr, &x, &y, &z, &a, &s, eq, vars, &ini, opt)); }
/// Print fitted last formula (with coefficients)
inline void PutsFit(mglPoint p, const char *prefix=0, const char *font="", double size=-1)
{ mgl_puts_fit(gr, p.x, p.y, p.z, prefix, font, size); }
/// Get last fitted formula
inline const char *GetFit() const
{ return mgl_get_fit(gr); }
/// Get chi for last fitted formula
static inline mreal GetFitChi()
{ return mgl_get_fit_chi(); }
/// Get covariance matrix for last fitted formula
static inline mglData GetFitCovar()
{ return mglData(mgl_get_fit_covar()); }
/// Solve PDE with x,y,z in range axis range
inline mglData PDE(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglData(true,mgl_pde_solve(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range
inline mglDataC PDEc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglDataC(true,mgl_pde_solve_c(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range using advanced (slow!!!) method (2d only)
inline mglData APDE(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglData(true,mgl_pde_adv(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range using advanced (slow!!!) method (2d only)
inline mglDataC APDEc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglDataC(true,mgl_pde_adv_c(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Fill data by formula with x,y,z in range axis range
inline void Fill(mglData &u, const char *eq, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, 0, 0, opt); }
inline void Fill(mglData &u, const char *eq, const mglDataA &v, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, &v, 0, opt); }
inline void Fill(mglData &u, const char *eq, const mglDataA &v, const mglDataA &w, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, &v, &w, opt); }
/// Fill data by formula with x,y,z in range axis range
inline void Fill(mglDataC &u, const char *eq, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, 0, 0, opt); }
inline void Fill(mglDataC &u, const char *eq, const mglDataA &v, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, &v, 0, opt); }
inline void Fill(mglDataC &u, const char *eq, const mglDataA &v, const mglDataA &w, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, &v, &w, opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat for x in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,0,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat for x,y in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,&ydat,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat for x in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,0,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat for x,y in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,&ydat,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Set the data by triangulated surface values assuming x,y,z in range axis range
inline void DataGrid(mglData &d, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *opt="")
{ mgl_data_grid(gr,&d,&x,&y,&z,opt); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_x(gr, &x, &a, opt)); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &y, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_xy(gr, &x, &y, &a, opt)); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_xyz(gr, &x, &y, &z, &a, opt)); }
inline void Compression(bool){} // NOTE: Add later -- IDTF
/// Set the preference for vertex color on/off (for formats that support it, now only PRC does).
inline void VertexColor(bool enable) { mgl_set_flag(gr,enable, MGL_PREFERVC); }
/// Render only front side of surfaces for dubugging purposes (for formats that support it, now only PRC does).
inline void DoubleSided(bool enable) { mgl_set_flag(gr,!enable, MGL_ONESIDED); }
// inline void TextureColor(bool){} // NOTE: Add later -- IDTF
};
//-----------------------------------------------------------------------------
/// Wrapper class for MGL parsing
class MGL_EXPORT mglParse
{
HMPR pr;
public:
mglParse(HMPR p) { pr = p; mgl_use_parser(pr,1); }
mglParse(mglParse &p) { pr = p.pr; mgl_use_parser(pr,1); }
mglParse(bool setsize=false)
{ pr=mgl_create_parser(); mgl_parser_allow_setsize(pr, setsize); }
virtual ~mglParse()
{
#pragma omp critical
if(mgl_use_parser(pr,-1)<1) mgl_delete_parser(pr);
}
/// Get pointer to internal mglParser object
inline HMPR Self() { return pr; }
/// Parse and draw single line of the MGL script
inline int Parse(mglGraph *gr, const char *str, int pos)
{ return mgl_parse_line(gr->Self(), pr, str, pos); }
inline int Parse(mglGraph *gr, const wchar_t *str, int pos)
{ return mgl_parse_linew(gr->Self(), pr, str, pos); }
/// Execute MGL script text with '\n' separated lines
inline void Execute(mglGraph *gr, const char *str)
{ mgl_parse_text(gr->Self(), pr, str); }
inline void Execute(mglGraph *gr, const wchar_t *str)
{ mgl_parse_textw(gr->Self(), pr, str); }
/// Execute and draw script from the file
inline void Execute(mglGraph *gr, FILE *fp, bool print=false)
{ mgl_parse_file(gr->Self(), pr, fp, print); }
/// Return type of command: 0 - not found, 1 - other data plot, 2 - func plot,
/// 3 - setup, 4 - data handle, 5 - data create, 6 - subplot, 7 - program
/// 8 - 1d plot, 9 - 2d plot, 10 - 3d plot, 11 - dd plot, 12 - vector plot
/// 13 - axis, 14 - primitives, 15 - axis setup, 16 - text/legend, 17 - data transform
inline int CmdType(const char *name)
{ return mgl_parser_cmd_type(pr, name); }
/// Return string of command format (command name and its argument[s])
inline const char *CmdFormat(const char *name)
{ return mgl_parser_cmd_frmt(pr, name); }
/// Return description of MGL command
inline const char *CmdDesc(const char *name)
{ return mgl_parser_cmd_desc(pr, name); }
/// Get name of command with nmber n
inline const char *GetCmdName(long n)
{ return mgl_parser_cmd_name(pr,n); }
/// Get number of defined commands
inline long GetCmdNum()
{ return mgl_parser_cmd_num(pr); }
/// Load new commands from external dynamic Library (must have "const mglCommand *mgl_cmd_extra" variable)
inline void LoadDLL(const char *fname)
{ mgl_parser_load(pr, fname); }
/// Apply one step for equation d vars[i]/dt = eqs[i] using Runge-Kutta method
inline void RK_Step(const char *eqs, const char *vars, mreal dt=1)
{ mgl_rk_step(pr, eqs, vars, dt); }
inline void RK_Step(const wchar_t *eqs, const wchar_t *vars, mreal dt=1)
{ mgl_rk_step_w(pr, eqs, vars, dt); }
/// Set value for parameter $N
inline void AddParam(int id, const char *str)
{ mgl_parser_add_param(pr, id, str); }
inline void AddParam(int id, const wchar_t *str)
{ mgl_parser_add_paramw(pr, id, str); }
/// Restore once flag
inline void RestoreOnce() { mgl_parser_restore_once(pr); }
/// Allow changing size of the picture
inline void AllowSetSize(bool allow) { mgl_parser_allow_setsize(pr, allow); }
/// Allow reading/saving files
inline void AllowFileIO(bool allow) { mgl_parser_allow_file_io(pr, allow); }
/// Allow loading commands from external libraries
inline void AllowDllCall(bool allow) { mgl_parser_allow_dll_call(pr, allow); }
/// Set flag to stop script parsing
inline void Stop() { mgl_parser_stop(pr); }
/// Set variant of argument(s) separated by '?' to be used in further commands
inline void SetVariant(int var=0)
{ mgl_parser_variant(pr, var); }
/// Return result of formula evaluation
inline mglData Calc(const char *formula)
{ return mglData(true,mgl_parser_calc(pr,formula)); }
inline mglData Calc(const wchar_t *formula)
{ return mglData(true,mgl_parser_calcw(pr,formula)); }
/// Return result of formula evaluation as complex data
inline mglDataC CalcComplex(const char *formula)
{ return mglDataC(true,mgl_parser_calc_complex(pr,formula)); }
inline mglDataC CalcComplex(const wchar_t *formula)
{ return mglDataC(true,mgl_parser_calc_complexw(pr,formula)); }
/// Find variable with given name or add a new one
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *AddVar(const char *name)
{ return mgl_parser_add_var(pr, name); }
inline mglDataA *AddVar(const wchar_t *name)
{ return mgl_parser_add_varw(pr, name); }
/// Find variable with given name or return NULL if no one
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *FindVar(const char *name)
{ return mgl_parser_find_var(pr, name); }
inline mglDataA *FindVar(const wchar_t *name)
{ return mgl_parser_find_varw(pr, name); }
/// Get variable with given id. Can be NULL for temporary ones.
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *GetVar(unsigned long id)
{ return mgl_parser_get_var(pr,id); }
/// Get number of variables
inline long GetNumVar()
{ return mgl_parser_num_var(pr); }
/// Delete variable with name
inline void DeleteVar(const char *name) { mgl_parser_del_var(pr, name); }
inline void DeleteVar(const wchar_t *name) { mgl_parser_del_varw(pr, name); }
/// Delete all data variables
void DeleteAll() { mgl_parser_del_all(pr); }
};
//-----------------------------------------------------------------------------
#endif
#endif
|
barrier.c | /*
* barrier.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
{
if (omp_get_thread_num() == 0) {
var++;
}
#pragma omp barrier
if (omp_get_thread_num() == 1) {
var++;
}
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
fileparse.c | /* Copyright (c) 2015 The University of Edinburgh. */
/*
* This software was developed as part of the
* EC FP7 funded project Adept (Project ID: 610490)
* www.adept-project.eu
*/
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <ctype.h>
#include <omp.h>
#include "utils.h"
#include "level1.h"
int create_line(char*, size_t, char*, unsigned int);
int seek_match(char*, size_t, char*, unsigned int);
void fileparse(unsigned int num_rows){
char search_phrase[] = "AdeptProject";
size_t sp_len = strlen(search_phrase);
unsigned int desired_line_len = 81;
char line[desired_line_len];
srand(time(NULL)); // Set seed
int i = 0;
int r = 0;
int m = 0;
int stop=0, tn;
int mismatch = 0;
int r_count = 0;
int m_count = 0;
struct timespec start, end;
FILE* fp;
fp = fopen("testfile", "w+");
for (i=0;i<num_rows;i++){
r = create_line(search_phrase, sp_len, line, desired_line_len);
m = seek_match(search_phrase, sp_len, line, desired_line_len);
if (r!=m){
mismatch++;
}
if (r==0){
r_count++;
}
fprintf(fp, "%s\n", line);
}
fsync(fileno(fp));
fclose(fp);
m=0;
clock_gettime(CLOCK, &start);
fp = fopen("testfile", "r");
// Threaded version of while loop
#pragma omp parallel reduction(+:m_count)
while (!stop){
// Critical section to evaluate stopping criterion and flush outcome
#pragma omp critical
if (fscanf(fp, "%s\n", line)==EOF){
stop=1;
#pragma omp flush(stop)
}
else{
m = seek_match(search_phrase, sp_len, line, desired_line_len);
if (m==0){
m_count++;
}
}
}
fclose(fp);
clock_gettime(CLOCK, &end);
elapsed_time_hr(start, end, "Fileparse");
unlink("testfile"); // Use this to ensure the generated file is removed from the system upon finish
}
/*
* Create a line of random characters
* Line will be ll long and appears in l
* Randomly, phrase contained in sp and of sp_len length will be added to l at a random position
*/
int create_line(char* sp, size_t sp_len, char* l, unsigned int ll){
int i = 0;
int r = 0;
int flag = 0;
for (i=0;i<ll;i++){
r = (rand() % 128);
while(!isalnum(r)){
r = (rand() % 128);
}
l[i] = (char)r;
}
l[i+1] = '\0';
r = rand() % 2;
if (r==0){
flag = 0;
r = rand() % (ll - sp_len);
for (i=0;i<sp_len;i++){
l[r+i] = sp[i];
}
}
else{
flag = 1;
}
return flag;
}
/*
* Naive matching algorithm
*/
int seek_match(char* sp, size_t sp_len, char* l, unsigned int ll){
int i = 0;
int flag = 1;
for (i=0;i<ll-sp_len;i++){
if (l[i]==sp[0]){
if (strncmp(&l[i], &sp[0], sp_len) == 0){
flag = 0;
break;
}
}
}
return flag;
}
|
ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#define MIN 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#include <helpers/sharedmem.h>
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - params[0]), 2.f);
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoisonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoisonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < _v)
return _v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1));
return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_elu<X,X>(d1);
}
};
template <typename X>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_eluderivative<X,X>(d1);
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_leakyrelu<X,Z>(d1, d2);
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)) * nd4j::math::nd4j_log<X, Z>(nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2.0f)));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<X, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction) / static_cast<X>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old),
nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(reduction), nd4j::math::nd4j_abs<X>(reduction)));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(X *extraParamsTotal, X *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
if (nd4j::math::nd4j_isinf<X>(d1) && nd4j::math::nd4j_isinf<X>(d2)) {
if (d1 > 0 && d2 > 0)
return static_cast<Z>(0.f);
else if (d1 < 0 && d2 < 0)
return static_cast<Z>(0.f);
else
return static_cast<Z>(1.f);
}
Z eps = nd4j::math::nd4j_abs<Z>(extraParamsRef[2]);
Z diff = static_cast<Z>(nd4j::math::nd4j_abs<X>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return static_cast<Z>(0.f);
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(static_cast<Z>(d1)), nd4j::math::nd4j_abs<Z>(static_cast<Z>(d2))) * eps)
return static_cast<Z>(0.f);
return static_cast<Z>(1.f);
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
gsrb.c | /*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
//------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#if defined(GSRB_FP)
#warning Overriding default GSRB implementation and using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization...
#elif defined(GSRB_STRIDE2)
#if defined(GSRB_OOP)
#warning Overriding default GSRB implementation and using out-of-place and stride-2 accesses to minimize the number of flops
#else
#warning Overriding default GSRB implementation and using stride-2 accesses to minimize the number of flops
#endif
#elif defined(GSRB_BRANCH)
#if defined(GSRB_OOP)
#warning Overriding default GSRB implementation and using out-of-place implementation with an if-then-else on loop indices...
#else
#warning Overriding default GSRB implementation and using if-then-else on loop indices...
#endif
#else
#define GSRB_STRIDE2 // default implementation
#endif
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int x_id, int rhs_id, double a, double b){
int block,s;
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
#pragma omp target data map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes])\
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE && GPU_ENABLE_SMOOTHER)
{
for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps per GSRB smooth
// exchange the ghost zone...
#ifdef GSRB_OOP // out-of-place GSRB ping pongs between x and VECTOR_TEMP
if((s&1)==0){
exchange_boundary(level,x_id,stencil_get_shape());
apply_BCs(level,x_id,stencil_get_shape());
}
else{
exchange_boundary(level,VECTOR_TEMP,stencil_get_shape());
apply_BCs(level,VECTOR_TEMP,stencil_get_shape());
}
#else // in-place GSRB only operates on x
exchange_boundary(level,x_id,stencil_get_shape());
apply_BCs(level,x_id,stencil_get_shape());
#endif
// apply the smoother...
double _timeStart = getTime();
double * RedBlack_FP = level->RedBlack_FP;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
double h = level->h;
int box_ghosts = level->box_ghosts;
int xn_base_for_this_s = ((s&1) == 0) ? x_id : VECTOR_TEMP;
int xnp1_base_for_this_s = ((s&1) == 0) ? VECTOR_TEMP : x_id;
int size = level->num_my_boxes * level->box_volume;
int index_xn = xn_base_for_this_s *num_my_boxes*box_volume;
int index_xnp1 = xnp1_base_for_this_s *num_my_boxes*box_volume;
double *vector_xn = vector_base;
double *vector_xnp = vector_base;
#pragma omp target\
map(to:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
map(to:vector_xn[index_xn:index_xn + size]) \
map(from:vector_xnp[index_xnp1:index_xnp1 + size]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE && GPU_ENABLE_SMOOTHER)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, box_volume, h, \
box_ghosts, rhs_id, x_id, s)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const double h2inv = 1.0/(h*h);
const int ghosts = box_ghosts;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int color000 = (my_boxes[box].low.i^my_boxes[box].low.j^my_boxes[box].low.k^s)&1;
// is element 000 red or black on *THIS* sweep
const double * __restrict__ rhs = &vector_base[rhs_id*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
const double * __restrict__ alpha = &vector_base[VECTOR_ALPHA*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
const double * __restrict__ beta_i = &vector_base[VECTOR_BETA_I*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
const double * __restrict__ beta_j = &vector_base[VECTOR_BETA_J*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
const double * __restrict__ beta_k = &vector_base[VECTOR_BETA_K*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
const double * __restrict__ Dinv = &vector_base[VECTOR_DINV*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#ifdef GSRB_OOP
const double * __restrict__ x_n;
double * __restrict__ x_np1;
if((s&1)==0){
x_n = &vector_xn[x_id*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
x_np1 = &vector_xnp[VECTOR_TEMP*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
}
else{
x_n = &vector_xn[VECTOR_TEMP*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
x_np1 = &vector_xnp[x_id*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
}
#else
// i.e. [0] = first non ghost zone point
const double * __restrict__ x_n = &vector_base[VECTOR_TEMP*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
// i.e. [0] = first non ghost zone point
double * __restrict__ x_np1 = &vector_base[x_id*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#endif
#if defined(GSRB_FP)
#error GSRB_FP has not been implemented
#pragma omp parallel for \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE && GPU_ENABLE_SMOOTHER)
for(k=klo;k<khi;k++){
const double * __restrict__ RedBlack = RedBlack_FP +
ghosts*(1+jStride) +
kStride*((k^color000)&0x1);
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ij = i + j*jStride;
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(x_n);
double lambda = Dinv_ijk();
x_np1[ijk] = x_n[ijk] + RedBlack[ij]*lambda*(rhs[ijk]-Ax);
}
}
}
#elif defined(GSRB_STRIDE2)
#warning GSRB_STRIDE2 running on GPU does not give same error as CPU version, please use GSRB_BRANCH
#pragma omp parallel for \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE && GPU_ENABLE_SMOOTHER)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
#ifdef GSRB_OOP
// out-of-place must copy old value...
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
x_np1[ijk] = x_n[ijk];
}
#endif
for(i=ilo+((ilo^j^k^color000)&1);i<ihi;i+=2){ // stride-2 GSRB
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(x_n);
double lambda = Dinv_ijk();
x_np1[ijk] = x_n[ijk] + lambda*(rhs[ijk]-Ax);
}
}
}
#elif defined(GSRB_BRANCH)
#pragma omp parallel for \
collapse(3) if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE && GPU_ENABLE_SMOOTHER)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
if((i^j^k^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0
double Ax = apply_op_ijk(x_n);
double lambda = Dinv_ijk();
x_np1[ijk] = x_n[ijk] + lambda*(rhs[ijk]-Ax);
#ifdef GSRB_OOP
}else{
x_np1[ijk] = x_n[ijk]; // copy old value when sweep color != cell color
#endif
}
}
}
}
#else
#error no GSRB implementation was specified
#endif
} // boxes
level->timers.smooth += (double)(getTime()-_timeStart);
} // s-loop
} //target data
}
//------------------------------------------------------------------------------------------------------------------------------
|
ssca2.c | /* =============================================================================
*
* ssca2.c
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include "computeGraph.h"
#include "cutClusters.h"
#include "defs.h"
#include "findSubGraphs.h"
#include "genScalData.h"
#include "getStartLists.h"
#include "getUserParameters.h"
#include "globals.h"
#include "timer.h"
#include "thread.h"
#include "tm.h"
MAIN(argc, argv)
{
char exitmsg[1024];
GOTO_REAL();
load_syncchar_map("sync_char.map.ssca2");
sprintf(exitmsg, "END BENCHMARK %s-parallel-phase\n", argv[0]);
/*
* Tuple for Scalable Data Generation
* stores startVertex, endVertex, long weight and other info
*/
graphSDG* SDGdata;
/*
* The graph data structure for this benchmark - see defs.h
*/
graph* G;
#ifdef ENABLE_KERNEL2
/*
* Kernel 2
*/
edge* maxIntWtList;
edge* soughtStrWtList;
long maxIntWtListSize;
long soughtStrWtListSize;
#endif /* ENABLE_KERNEL2 */
#ifdef ENABLE_KERNEL3
# ifndef ENABLE_KERNEL2
# error KERNEL3 requires KERNEL2
# endif
/*
* Kernel 3
*/
V* intWtVList = NULL;
V* strWtVList = NULL;
Vl** intWtVLList = NULL;
Vl** strWtVLList = NULL;
Vd* intWtVDList = NULL;
Vd* strWtVDList = NULL;
#endif /* ENABLE_KERNEL3 */
double totalTime = 0.0;
/* -------------------------------------------------------------------------
* Preamble
* -------------------------------------------------------------------------
*/
/*
* User Interface: Configurable parameters, and global program control
*/
printf("\nHPCS SSCA #2 Graph Analysis Executable Specification:");
printf("\nRunning...\n\n");
getUserParameters(argc, (char** const) argv);
SIM_GET_NUM_CPU(THREADS);
TM_STARTUP(THREADS);
P_MEMORY_STARTUP(THREADS);
thread_startup(THREADS);
puts("");
printf("Number of processors: %ld\n", THREADS);
printf("Problem Scale: %ld\n", SCALE);
printf("Max parallel edges: %ld\n", MAX_PARAL_EDGES);
printf("Percent int weights: %f\n", PERC_INT_WEIGHTS);
printf("Probability unidirectional: %f\n", PROB_UNIDIRECTIONAL);
printf("Probability inter-clique: %f\n", PROB_INTERCL_EDGES);
printf("Subgraph edge length: %ld\n", SUBGR_EDGE_LENGTH);
printf("Kernel 3 data structure: %ld\n", K3_DS);
puts("");
/*
* Scalable Data Generator
*/
printf("\nScalable Data Generator - genScalData() beginning execution...\n");
SDGdata = (graphSDG*)malloc(sizeof(graphSDG));
assert(SDGdata);
TIMER_T start;
TIMER_READ(start);
#ifdef USE_PARALLEL_DATA_GENERATION
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
genScalData((void*)SDGdata);
}
#else
thread_start(genScalData, (void*)SDGdata);
#endif
GOTO_REAL();
#else /* !USE_PARALLEL_DATA_GENERATION */
genScalData_seq(SDGdata);
#endif /* !USE_PARALLEL_DATA_GENERATION */
TIMER_T stop;
TIMER_READ(stop);
double time = TIMER_DIFF_SECONDS(start, stop);
totalTime += time;
printf("\nTime taken for Scalable Data Generation is %9.6f sec.\n\n", time);
printf("\n\tgenScalData() completed execution.\n");
#ifdef ENABLE_KERNEL1
/* -------------------------------------------------------------------------
* Kernel 1 - Graph Construction
*
* From the input edges, construct the graph 'G'
* -------------------------------------------------------------------------
*/
printf("\nKernel 1 - computeGraph() beginning execution...\n");
G = (graph*)malloc(sizeof(graph));
assert(G);
computeGraph_arg_t computeGraphArgs;
computeGraphArgs.GPtr = G;
computeGraphArgs.SDGdataPtr = SDGdata;
TIMER_READ(start);
OSA_PRINT("entering parallel phase\n",0);
START_INSTRUMENTATION();
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
computeGraph((void*)&computeGraphArgs);
}
#else
thread_start(computeGraph, (void*)&computeGraphArgs);
#endif
GOTO_REAL();
OSA_PRINT("exiting parallel phase\n",0);
OSA_PRINT(exitmsg,0);
STOP_INSTRUMENTATION();
TIMER_READ(stop);
time = TIMER_DIFF_SECONDS(start, stop);
totalTime += time;
printf("\n\tcomputeGraph() completed execution.\n");
printf("\nTime taken for kernel 1 is %9.6f sec.\n", time);
#endif /* ENABLE_KERNEL1 */
#ifdef ENABLE_KERNEL2
/* -------------------------------------------------------------------------
* Kernel 2 - Find Max weight and sought string
* -------------------------------------------------------------------------
*/
printf("\nKernel 2 - getStartLists() beginning execution...\n");
maxIntWtListSize = 0;
soughtStrWtListSize = 0;
maxIntWtList = (edge*)malloc(sizeof(edge));
assert(maxIntWtList);
soughtStrWtList = (edge*)malloc(sizeof(edge));
assert(soughtStrWtList);
getStartLists_arg_t getStartListsArg;
getStartListsArg.GPtr = G;
getStartListsArg.maxIntWtListPtr = &maxIntWtList;
getStartListsArg.maxIntWtListSize = &maxIntWtListSize;
getStartListsArg.soughtStrWtListPtr = &soughtStrWtList;
getStartListsArg.soughtStrWtListSize = &soughtStrWtListSize;
TIMER_READ(start);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
getStartLists((void*)&getStartListsArg);
}
#else
thread_start(getStartLists, (void*)&getStartListsArg);
#endif
GOTO_REAL();
TIMER_READ(stop);
time = TIMER_DIFF_SECONDS(start, stop);
totalTime += time;
printf("\n\tgetStartLists() completed execution.\n");
printf("\nTime taken for kernel 2 is %9.6f sec.\n\n", time);
#endif /* ENABLE_KERNEL2 */
#ifdef ENABLE_KERNEL3
/* -------------------------------------------------------------------------
* Kernel 3 - Graph Extraction
* -------------------------------------------------------------------------
*/
printf("\nKernel 3 - findSubGraphs() beginning execution...\n");
if (K3_DS == 0) {
intWtVList = (V*)malloc(G->numVertices * maxIntWtListSize * sizeof(V));
assert(intWtVList);
strWtVList = (V*)malloc(G->numVertices * soughtStrWtListSize * sizeof(V));
assert(strWtVList);
findSubGraphs0_arg_t findSubGraphs0Arg;
findSubGraphs0Arg.GPtr = G;
findSubGraphs0Arg.intWtVList = intWtVList;
findSubGraphs0Arg.strWtVList = strWtVList;
findSubGraphs0Arg.maxIntWtList = maxIntWtList;
findSubGraphs0Arg.maxIntWtListSize = maxIntWtListSize;
findSubGraphs0Arg.soughtStrWtList = soughtStrWtList;
findSubGraphs0Arg.soughtStrWtListSize = soughtStrWtListSize;
TIMER_READ(start);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
findSubGraphs0((void*)&findSubGraphs0Arg);
}
#else
thread_start(findSubGraphs0, (void*)&findSubGraphs0Arg);
#endif
GOTO_REAL();
TIMER_READ(stop);
} else if (K3_DS == 1) {
intWtVLList = (Vl**)malloc(maxIntWtListSize * sizeof(Vl*));
assert(intWtVLList);
strWtVLList = (Vl**)malloc(soughtStrWtListSize * sizeof(Vl*));
assert(strWtVLList);
findSubGraphs1_arg_t findSubGraphs1Arg;
findSubGraphs1Arg.GPtr = G;
findSubGraphs1Arg.intWtVLList = intWtVLList;
findSubGraphs1Arg.strWtVLList = strWtVLList;
findSubGraphs1Arg.maxIntWtList = maxIntWtList;
findSubGraphs1Arg.maxIntWtListSize = maxIntWtListSize;
findSubGraphs1Arg.soughtStrWtList = soughtStrWtList;
findSubGraphs1Arg.soughtStrWtListSize = soughtStrWtListSize;
TIMER_READ(start);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
findSubGraphs1((void*)&findSubGraphs1Arg);
}
#else
thread_start(findSubGraphs1, (void*)&findSubGraphs1Arg);
#endif
GOTO_REAL();
TIMER_READ(stop);
/* Verification
on_one_thread {
for (i=0; i<maxIntWtListSize; i++) {
printf("%ld -- ", i);
currV = intWtVLList[i];
while (currV != NULL) {
printf("[%ld %ld] ", currV->num, currV->depth);
currV = currV->next;
}
printf("\n");
}
for (i=0; i<soughtStrWtListSize; i++) {
printf("%ld -- ", i);
currV = strWtVLList[i];
while (currV != NULL) {
printf("[%ld %ld] ", currV->num, currV->depth);
currV = currV->next;
}
printf("\n");
}
}
*/
} else if (K3_DS == 2) {
intWtVDList = (Vd *) malloc(maxIntWtListSize * sizeof(Vd));
assert(intWtVDList);
strWtVDList = (Vd *) malloc(soughtStrWtListSize * sizeof(Vd));
assert(strWtVDList);
findSubGraphs2_arg_t findSubGraphs2Arg;
findSubGraphs2Arg.GPtr = G;
findSubGraphs2Arg.intWtVDList = intWtVDList;
findSubGraphs2Arg.strWtVDList = strWtVDList;
findSubGraphs2Arg.maxIntWtList = maxIntWtList;
findSubGraphs2Arg.maxIntWtListSize = maxIntWtListSize;
findSubGraphs2Arg.soughtStrWtList = soughtStrWtList;
findSubGraphs2Arg.soughtStrWtListSize = soughtStrWtListSize;
TIMER_READ(start);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
findSubGraphs2((void*)&findSubGraphs2Arg);
}
#else
thread_start(findSubGraphs2, (void*)&findSubGraphs2Arg);
#endif
GOTO_REAL();
TIMER_READ(stop);
/* Verification */
/*
on_one_thread {
printf("\nInt weight sub-graphs \n");
for (i=0; i<maxIntWtListSize; i++) {
printf("%ld -- ", i);
for (j=0; j<intWtVDList[i].numArrays; j++) {
printf("\n [Array %ld] - \n", j);
for (k=0; k<intWtVDList[i].arraySize[j]; k++) {
printf("[%ld %ld] ", intWtVDList[i].vList[j][k].num, intWtVDList[i].vList[j][k].depth);
}
}
printf("\n");
}
printf("\nStr weight sub-graphs \n");
for (i=0; i<soughtStrWtListSize; i++) {
printf("%ld -- ", i);
for (j=0; j<strWtVDList[i].numArrays; j++) {
printf("\n [Array %ld] - \n", j);
for (k=0; k<strWtVDList[i].arraySize[j]; k++) {
printf("[%ld %ld] ", strWtVDList[i].vList[j][k].num, strWtVDList[i].vList[j][k].depth);
}
}
printf("\n");
}
}
*/
} else {
assert(0);
}
time = TIMER_DIFF_SECONDS(start, stop);
totalTime += time;
printf("\n\tfindSubGraphs() completed execution.\n");
printf("\nTime taken for kernel 3 is %9.6f sec.\n\n", time);
#endif /* ENABLE_KERNEL3 */
#ifdef ENABLE_KERNEL4
/* -------------------------------------------------------------------------
* Kernel 4 - Graph Clustering
* -------------------------------------------------------------------------
*/
printf("\nKernel 4 - cutClusters() beginning execution...\n");
TIMER_READ(start);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
cutClusters((void*)G);
}
#else
thread_start(cutClusters, (void*)G);
#endif
GOTO_REAL();
TIMER_READ(stop);
time = TIMER_DIFF_SECONDS(start, stop);
totalTime += time;
printf("\n\tcutClusters() completed execution.\n");
printf("\nTime taken for Kernel 4 is %9.6f sec.\n\n", time);
#endif /* ENABLE_KERNEL4 */
printf("\nTime taken for all is %9.6f sec.\n\n", totalTime);
/* -------------------------------------------------------------------------
* Cleanup
* -------------------------------------------------------------------------
*/
P_FREE(G->outDegree);
P_FREE(G->outVertexIndex);
P_FREE(G->outVertexList);
P_FREE(G->paralEdgeIndex);
P_FREE(G->inDegree);
P_FREE(G->inVertexIndex);
P_FREE(G->inVertexList);
P_FREE(G->intWeight);
P_FREE(G->strWeight);
#ifdef ENABLE_KERNEL3
LONGINT_T i;
LONGINT_T j;
Vl* currV;
Vl* tempV;
if (K3_DS == 0) {
P_FREE(strWtVList);
P_FREE(intWtVList);
}
if (K3_DS == 1) {
for (i = 0; i < maxIntWtListSize; i++) {
currV = intWtVLList[i];
while (currV != NULL) {
tempV = currV->next;
P_FREE(currV);
currV = tempV;
}
}
for (i = 0; i < soughtStrWtListSize; i++) {
currV = strWtVLList[i];
while (currV != NULL) {
tempV = currV->next;
P_FREE(currV);
currV = tempV;
}
}
P_FREE(strWtVLList);
P_FREE(intWtVLList);
}
if (K3_DS == 2) {
for (i = 0; i < maxIntWtListSize; i++) {
for (j = 0; j < intWtVDList[i].numArrays; j++) {
P_FREE(intWtVDList[i].vList[j]);
}
P_FREE(intWtVDList[i].vList);
P_FREE(intWtVDList[i].arraySize);
}
for (i = 0; i < soughtStrWtListSize; i++) {
for (j = 0; j < strWtVDList[i].numArrays; j++) {
P_FREE(strWtVDList[i].vList[j]);
}
P_FREE(strWtVDList[i].vList);
P_FREE(strWtVDList[i].arraySize);
}
P_FREE(strWtVDList);
P_FREE(intWtVDList);
}
P_FREE(soughtStrWtList);
P_FREE(maxIntWtList);
#endif /* ENABLE_KERNEL2 */
P_FREE(SOUGHT_STRING);
P_FREE(G);
P_FREE(SDGdata);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of ssca2.c
*
* =============================================================================
*/
|
GB_binop__iseq_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int8)
// A*D function (colscale): GB (_AxD__iseq_int8)
// D*A function (rowscale): GB (_DxB__iseq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int8)
// C=scalar+B GB (_bind1st__iseq_int8)
// C=scalar+B' GB (_bind1st_tran__iseq_int8)
// C=A+scalar GB (_bind2nd__iseq_int8)
// C=A'+scalar GB (_bind2nd_tran__iseq_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT8 || GxB_NO_ISEQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolutiondepthwisebnrelu_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdwbnrelu3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& _a_data, const Mat& _b_data)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
const float* a_data = _a_data;
const float* b_data = _b_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float a = a_data[g];
float b = b_data[g];
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
int remain = outw * outh;
outptr = out;
for (; remain>0; remain--)
{
*outptr = b*(*outptr)+a;
if(*outptr < 0)
*outptr = 0;
outptr++;
}
}
}
static void convdwbnrelu3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& _a_data, const Mat& _b_data)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
const float* a_data = _a_data;
const float* b_data = _b_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float a = a_data[g];
float b = b_data[g];
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
int remain = outw * outh;
outptr = out;
for (; remain>0; remain--)
{
*outptr = b*(*outptr)+a;
if(*outptr < 0)
*outptr = 0;
outptr++;
}
}
}
|
modal_analysis_builder_and_solver.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: janosch $
* Date: $Date: 2008-04-29 12:23:09 $
* Revision: $Revision: 1.1 $
*
* ***********************************************************/
#if !defined(KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER )
#define KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <omp.h>
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "linear_solvers/power_iteration_eigenvalue_solver.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
Current class provides an implementation for standard builder and solving operations.
the RHS is constituted by the unbalanced loads (residual)
Degrees of freedom are reordered putting the restrained degrees of freedom at
the end of the system ordered in reverse order with respect to the DofSet.
Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information.
Calculation of the reactions involves a cost very similiar to the calculation of the total residual
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace , //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ModalAnalysisBuilderAndSolver
: public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
//typedef boost::shared_ptr< ModalAnalysisBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver> > Pointer;
KRATOS_CLASS_POINTER_DEFINITION( ModalAnalysisBuilderAndSolver );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ModalAnalysisBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver)
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
virtual ~ModalAnalysisBuilderAndSolver(){}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
if(!pScheme)
KRATOS_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
}
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH("finished parallel building");
/* LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
*/
//for( int i=0; i<A.size1(); i++ )
//{
// for( int j=0; j<A.size2(); j++ )
// {
// std::cout << A(i,j);
// }
// std::cout << std::endl;
//}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A)
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A)
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
double start_solve = omp_get_wtime();
double norm_b;
if(b.size() != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if(norm_b != 0.00)
BaseType::mpLinearSystemSolver->Solve(A,Dx,b);
else
TSparseSpace::SetToZero(Dx);
//prints informations about the current time
if (BaseType::GetEchoLevel()>1)
{
std::cout << *(BaseType::mpLinearSystemSolver) << std::endl;
}
double stop_solve= omp_get_wtime();
std::cout << "time: " << stop_solve - start_solve << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildAndSolve( typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
KRATOS_TRY
boost::timer building_time;
//construct mass matrix structure
TSystemMatrixType M = TSystemMatrixType( A.size1(), A.size2() );
//build matrices
BuildSystemMatrices( pScheme, r_model_part, A, M );
//elapsed time
if(BaseType::GetEchoLevel()>0)
{
std::cout << "Building Time : " << building_time.elapsed() << std::endl;
}
if (BaseType::GetEchoLevel()== 3)
{
std::cout << "before the solution of the system" << std::endl;
std::cout << "stiffness Matrix = " << A << std::endl;
std::cout << "mass Matrix = " << M << std::endl;
std::cout << "unknowns vector = " << Dx << std::endl;
std::cout << "RHS vector = " << b << std::endl;
}
boost::timer solve_time;
// SystemSolve(A,Dx,b);
PowerIterationEigenvalueSolver<TSparseSpace, TDenseSpace, TLinearSolver>
eigenvalue_solver( 1.0e-8, 1000, 1, BaseType::mpLinearSystemSolver );
LocalSystemVectorType Eigenvalues(1);
LocalSystemMatrixType Eigenvectors(1,1);
eigenvalue_solver.Solve( A, M, Eigenvalues, Eigenvectors);
if(BaseType::GetEchoLevel()>0)
{
std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl;
}
if (BaseType::GetEchoLevel()== 3)
{
std::cout << "after the solution of the system" << std::endl;
std::cout << "Eigenvalues = " << Eigenvalues << std::endl;
std::cout << "Eigenvectors = " << Eigenvectors << std::endl;
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
BuildRHS(pScheme,r_model_part,b);
SystemSolve(A,Dx,b);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part )
{
KRATOS_TRY
KRATOS_WATCH("setting up the dofs");
//Gets the array of elements from the modeler
ElementsArrayType& pElements = r_model_part.Elements();
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
//mDofSet.clear();
//double StartTime = GetTickCount();
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin();
it!=pElements.ptr_end(); ++it)
{
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo);
for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ;
i != ElementalDofList.end() ; ++i)
{
Doftemp.push_back(*i);
//mDofSet.push_back(*i);
}
}
//taking in account conditions
ConditionsArrayType& pConditions = r_model_part.Conditions();
for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin();
it!=pConditions.ptr_end(); ++it)
{
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo);
for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ;
i != ElementalDofList.end() ; ++i)
{
//mDofSet.push_back(*i);
Doftemp.push_back(*i);
}
}
Doftemp.Unique();
BaseType::mDofSet = Doftemp;
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpSystem(
ModelPart& r_model_part
)
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ElementsArrayType& rElements,
ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo
)
{
KRATOS_TRY
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false);
ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo);
}
else
{
if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true);
ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo);
}
}
if(Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize,false);
if(b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize,false);
//
//if needed resize the vector for the calculation of reactions
if(BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size()-BaseType::mEquationSystemSize;
if(BaseType::mReactionsVector.size() != ReactionsVectorSize)
BaseType::mReactionsVector.resize(ReactionsVectorSize,false);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme,r_model_part,b);
int i;
int systemsize = BaseType::mDofSet.size() - BaseType::mReactionsVector.size();
typename DofsArrayType::ptr_iterator it2;
//std::set<Dof::Pointer,ComparePDof>::iterator it2;
//updating variables
//for (it2=mDofSet.begin();it2 != mDofSet.end(); ++it2)
for (it2=BaseType::mDofSet.ptr_begin();it2 != BaseType::mDofSet.ptr_end(); ++it2)
{
if ( (*it2)->IsFixed() )
{
i=(*it2)->EquationId();
i-=systemsize;
(*it2)->GetSolutionStepReactionValue() = BaseType::mReactionsVector[i];
}
}
}
//**************************************************************************
//**************************************************************************
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{}
//**************************************************************************
//**************************************************************************
void ApplyPointLoads(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{}
/**
this function is intended to be called at the end of the solution step to clean up memory
storage not needed
*/
void Clear()
{
this->mDofSet = DofsArrayType();
this->mReactionsVector = TSystemVectorType();
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH("ModalAnalysisBuilderAndSolver Clear Function called");
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
virtual void ConstructMatrixStructure(
TSystemMatrixType& A,
ElementsContainerType& rElements,
ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo)
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices(equation_size);
// std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix));
Element::EquationIdVectorType ids(3,0);
for(typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++)
{
(i_element)->EquationIdVector(ids, CurrentProcessInfo);
for(std::size_t i = 0 ; i < ids.size() ; i++)
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; j++)
if(ids[j] < equation_size)
{
AddUnique(row_indices,ids[j]);
//indices[ids[i]].push_back(ids[j]);
}
}
}
for(typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++)
{
(i_condition)->EquationIdVector(ids, CurrentProcessInfo);
for(std::size_t i = 0 ; i < ids.size() ; i++)
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; j++)
if(ids[j] < equation_size)
{
AddUnique(row_indices,ids[j]);
// indices[ids[i]].push_back(ids[j]);
}
}
}
//allocating the memory needed
int data_size = 0;
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
data_size += indices[i].size();
}
A.reserve(data_size,false);
//filling with zero the matrix (creating the structure)
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i,*it,0.00);
// A()(i,*it) = 0.00;
}
//row_indices = std::vector<std::size_t>();
row_indices.clear();
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
unsigned int j_global=EquationId[j_local];
if ( j_global < BaseType::mEquationSystemSize )
{
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag==false) //if we don't need to calculate reactions
{
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{ // ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
}
}
else //when the calculation of reactions is needed
{
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{ // ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
else //on "fixed" DOFs
{ // Assembling the Vector of REACTIONS
BaseType::mReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local];
}
}
}
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
//**************************************************************************
//**************************************************************************
void BuildSystemMatrices(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& K,
TSystemMatrixType& M )
{
KRATOS_TRY
if(!pScheme)
KRATOS_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(BaseType::mReactionsVector);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType K_Contribution = LocalSystemMatrixType(0,0);
LocalSystemMatrixType M_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator
it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator
it_end=pElements.ptr_begin()+element_partition[k+1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->MassMatrix( M_Contribution, CurrentProcessInfo );
(*it)->CalculateLocalSystem( K_Contribution,RHS_Contribution,CurrentProcessInfo );
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(K,K_Contribution,EquationId);
AssembleLHS(M,M_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
}
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType K_Contribution = LocalSystemMatrixType(0,0);
LocalSystemMatrixType M_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator
it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator
it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->MassMatrix( M_Contribution, CurrentProcessInfo );
(*it)->CalculateLocalSystem( K_Contribution,RHS_Contribution,CurrentProcessInfo );
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(K,K_Contribution,EquationId);
AssembleLHS(M,M_Contribution,EquationId);
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH("finished parallel building");
KRATOS_CATCH("")
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
int j_global=EquationId[j_local];
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
//******************************************************************************************
//******************************************************************************************
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while ( i != endit && (*i) != candidate)
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ModalAnalysisBuilderAndSolver */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER defined */
|
host_as_target.c | // Check that specifying device as omp_get_initial_device():
// - Doesn't cause the runtime to fail.
// - Offloads code to the host.
// - Doesn't transfer data. In this case, just check that neither host data nor
// default device data are affected by the specified transfers.
// - Works whether it's specified directly or as the default device.
// RUN: %libomptarget-compile-run-and-check-generic
// amdgpu does not have a working printf definition
// XFAIL: amdgcn-amd-amdhsa
// XFAIL: amdgcn-amd-amdhsa-newRTL
#include <stdio.h>
#include <omp.h>
static void check(char *X, int Dev) {
printf(" host X = %c\n", *X);
#pragma omp target device(Dev)
printf("device X = %c\n", *X);
}
#define CHECK_DATA() check(&X, DevDefault)
int main(void) {
int DevDefault = omp_get_default_device();
int DevInit = omp_get_initial_device();
//--------------------------------------------------
// Initialize data on the host and default device.
//--------------------------------------------------
// CHECK: host X = h
// CHECK-NEXT: device X = d
char X = 'd';
#pragma omp target enter data map(to:X)
X = 'h';
CHECK_DATA();
//--------------------------------------------------
// Check behavior when specifying host directly.
//--------------------------------------------------
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target device(DevInit) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target teams device(DevInit) num_teams(1) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure
// how to check that it actually pushes to the initial device.
#pragma omp target teams device(DevInit) num_teams(1)
#pragma omp distribute
for (int i = 0; i < 2; ++i)
;
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target data device(DevInit) map(always,tofrom:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target enter data device(DevInit) map(always,to:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target exit data device(DevInit) map(always,from:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update device(DevInit) to(X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update device(DevInit) from(X)
;
CHECK_DATA();
//--------------------------------------------------
// Check behavior when device defaults to host.
//--------------------------------------------------
omp_set_default_device(DevInit);
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target teams num_teams(1) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure
// how to check that it actually pushes to the initial device.
#pragma omp target teams num_teams(1)
#pragma omp distribute
for (int i = 0; i < 2; ++i)
;
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target data map(always,tofrom:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target enter data map(always,to:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target exit data map(always,from:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update to(X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update from(X)
;
CHECK_DATA();
return 0;
}
|
dgemm.c | #include "cblas.h"
#include "dgemm_versions.h"
#include <assert.h>
#include <omp.h>
#include <stdio.h>
// #ifndef MYLIB_NUM_THREADS
// #define MYLIB_NUM_THREADS 10
// #endif
#define BLOCK_SIZE 2
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
const int K, const double alpha, const double *A,
const int lda, const double *B, const int ldb,
const double beta, double *C, const int ldc)
{
assert(Order == CblasColMajor);
assert(TransA == CblasTrans);
assert(TransB == CblasNoTrans);
// multiply C by beta
#pragma omp parallel for collapse(2)
for (int col = 0; col < N; col++) {
for (int row = 0; row < M; row++) {
C[ldc * col + row] = beta * C[ldc * col + row];
}
}
int nbRowBlocks = M/BLOCK_SIZE + (M%BLOCK_SIZE != 0 ? 1 : 0);
int nbColBlocks = N/BLOCK_SIZE + (N%BLOCK_SIZE != 0 ? 1 : 0);
int nbKBlocks = K/BLOCK_SIZE + (K%BLOCK_SIZE != 0 ? 1 : 0);
#pragma omp parallel for collapse(2)
for (int row = 0; row < nbRowBlocks; row++) {
for (int col = 0; col < nbColBlocks; col++) {
int rowStart = row * BLOCK_SIZE;
int rowEnd = min((row+1) * BLOCK_SIZE, M);
int colStart = col * BLOCK_SIZE;
int colEnd = min((col+1) * BLOCK_SIZE, N);
for (int k = 0; k < nbKBlocks; k++) {
int kStart = k * BLOCK_SIZE;
int kEnd = min((k+1) * BLOCK_SIZE, K);
cblas_dgemmScalarjik(Order, TransA, TransB,
rowEnd-rowStart, colEnd-colStart, kEnd-kStart, alpha,
&A[lda*rowStart + kStart], lda,
&B[ldb*colStart + kStart], ldb,
1, &C[ldc*colStart + rowStart], ldc);
}
}
}
}
|
master.c | #include <stdio.h>
#include <cmath>
#include "../common.h"
#include "kernel_fin.c"
#include "kernel_ecc.h"
#include "kernel_cam.h"
void
master( fp timeinst,
fp *initvalu,
fp *params,
fp *finavalu,
fp *com,
long long *timecopyin,
long long *timekernel,
long long *timecopyout)
{
//======================================================================================================================================================150
// VARIABLES
//======================================================================================================================================================150
//timer
long long time0;
long long time1;
long long time2;
long long time3;
// counters
int i;
// offset pointers
int initvalu_offset_ecc; // 46 points
int initvalu_offset_Dyad; // 15 points
int initvalu_offset_SL; // 15 points
int initvalu_offset_Cyt; // 15 poitns
// common variables
time0 = get_time();
//======================================================================================================================================================150
// COPY DATA TO GPU MEMORY
//======================================================================================================================================================150
//====================================================================================================100
// initvalu
//====================================================================================================100
#ifdef DEBUG
for (int i = 0; i < EQUATIONS; i++)
printf("initvalu %d %f\n", i, initvalu[i]);
for (int i = 0; i < PARAMETERS; i++)
printf("params %d %f\n", i, params[i]);
printf("\n");
#endif
#pragma omp target update to (initvalu[0:EQUATIONS])
#pragma omp target update to (params[0:PARAMETERS])
time1 = get_time();
// GPU: KERNEL
#pragma omp target teams num_teams(2) thread_limit(NUMBER_THREADS)
{
#pragma omp parallel
{
int bx = omp_get_team_num();
int tx = omp_get_thread_num();
// pointers
int valu_offset; // inivalu and finavalu offset
int params_offset; // parameters offset
int com_offset; // kernel1-kernel2 communication offset
// module parameters
fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaSL; // from ECC model, *** Converting from [mM] to [uM] ***
fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] ***
//====================================================================================================100
// ECC
//====================================================================================================100
// limit to useful threads
if(bx == 0){ // first processor runs ECC
if(tx == 0){ // only 1 thread runs it, since its a sequential code
// thread offset
valu_offset = 0; //
// ecc function
kernel_ecc( timeinst,
initvalu,
finavalu,
valu_offset,
params);
}
}
//====================================================================================================100
// CAM x 3
//====================================================================================================100
// limit to useful threads
else if(bx == 1){ // second processor runs CAMs (in parallel with ECC)
if(tx == 0){ // only 1 thread runs it, since its a sequential code
// specific
valu_offset = 46;
params_offset = 0;
com_offset = 0;
CaDyad = initvalu[35]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
initvalu,
finavalu,
valu_offset,
params,
params_offset,
com,
com_offset,
CaDyad);
// specific
valu_offset = 61;
params_offset = 5;
com_offset = 1;
CaSL = initvalu[36]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
initvalu,
finavalu,
valu_offset,
params,
params_offset,
com,
com_offset,
CaSL);
// specific
valu_offset = 76;
params_offset = 10;
com_offset = 2;
CaCyt = initvalu[37]*1e3; // from ECC model, *** Converting from [mM] to [uM] ***
// cam function for Dyad
kernel_cam( timeinst,
initvalu,
finavalu,
valu_offset,
params,
params_offset,
com,
com_offset,
CaCyt);
}
}
}
}
time2 = get_time();
#pragma omp target update from (finavalu[0:EQUATIONS])
#pragma omp target update from (com[0:3])
#ifdef DEBUG
for (int i = 0; i < EQUATIONS; i++)
printf("finavalu %d %f\n", i, finavalu[i]);
for (int i = 0; i < 3; i++)
printf("%f ", com[i]);
printf("\n");
#endif
time3 = get_time();
//======================================================================================================================================================150
// CPU: FINAL KERNEL
//======================================================================================================================================================150
// *copyin_time,
// *kernel_time,
// *copyout_time)
timecopyin[0] = timecopyin[0] + (time1-time0);
timekernel[0] = timekernel[0] + (time2-time1);
timecopyout[0] = timecopyout[0] + (time3-time2);
//======================================================================================================================================================150
// CPU: FINAL KERNEL
//======================================================================================================================================================150
initvalu_offset_ecc = 0; // 46 points
initvalu_offset_Dyad = 46; // 15 points
initvalu_offset_SL = 61; // 15 points
initvalu_offset_Cyt = 76; // 15 poitns
kernel_fin( initvalu,
initvalu_offset_ecc,
initvalu_offset_Dyad,
initvalu_offset_SL,
initvalu_offset_Cyt,
params,
finavalu,
com[0],
com[1],
com[2]);
//======================================================================================================================================================150
// COMPENSATION FOR NANs and INFs
//======================================================================================================================================================150
for(i=0; i<EQUATIONS; i++){
if (std::isnan(finavalu[i])){
finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001
}
else if (std::isinf(finavalu[i])){
finavalu[i] = 0.0001; // for INF set rate of change to 0.0001
}
}
}
|
hailstone.c | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <x86intrin.h>
#include "hailstone.h"
extern struct poly fpoly16[];
// flags
int bflag = 0;
uint32_t start_block = 0;
int cflag = 0;
#define CHECKPOINT_NAME "checkpoint.txt";
char *checkpoint_file = CHECKPOINT_NAME;
int dflag = 0;
int fflag = 0;
int lflag = 0;
FILE *logfile = NULL;
int nflag = 0;
uint32_t num_blocks = 1;
int wflag = 0;
uint32_t wvalue = 1;
// usage
char *usage[] = {
"hailstone [-b <n>][-c <file>][-f][-l <f>][-n <n>][-w <n>]",
"\t-b n, start at block n, where each block is 2^32 numbers",
"\t-c file, set name of checkpoint file (default checkpoint.txt)",
"\t-d, start in daemon mode",
"\t-n n, search n blocks, where each block is 2^32 numbers",
"\t-f , fast search - look for peak only and backtrack if found",
"\t-l f, use file l as a log file",
"\t-w n, search n blocks in parallel",
};
/* parse_options - read command line options and set flags */
int parse_options(int argc,char **argv){
int opt;
int i;
while ((opt = getopt(argc,argv,"b:c:dfl:n:w:?")) != -1){
switch(opt){
case 'b':
bflag = 1;
sscanf(optarg,"%u",&start_block);
break;
case 'c':
cflag = 1;
checkpoint_file = strdup(optarg);
break;
case 'd':
dflag = 1;
break;
case 'f':
fflag = 1;
break;
case 'l':
lflag = 1;
if (!strcmp(optarg,"-")) logfile = stdout;
else if ((logfile = fopen(optarg,"a+")) == 0){
fprintf(stderr,"unable to open logfile: %s\n",optarg);
goto usage;
}
break;
case 'n':
nflag = 1;
sscanf(optarg,"%u",&num_blocks);
break;
case 'w':
wflag = 1;
sscanf(optarg,"%u",&wvalue);
break;
case '?':
default:
usage:
fprintf(stderr,"usage: ");
for (i=0;i<(sizeof(usage)/sizeof(usage[0]));i++){
fputs(usage[i],stderr);
fputc('\n',stderr);
}
exit(0);
break;
}
}
if (bflag == 1 && fflag == 1){
fprintf(stderr,"The -f flag can not be given when start block is 0\n");
goto usage;
}
if (lflag == 0) logfile = stdout;
return optind;
}
// conduct a fast search - return 1 if peaks are found, 0 otherwise
int fast_search(uint64_t blocknum){
unsigned int i,j;
uint32_t n[MAXDIGITS] = { 0 };
int32_t steps;
int32_t peak_found;
int clz_value;
struct poly *fp;
unsigned long int num;
for (i=0;i<(1<<CUTOFF_WIDTH);i++){
if (cutoff16[i] & 0x80) continue; // duplicate polynomial
if (cutoff16[i] & 0x40){
// no max in values
// see if we can cut off all the polynomials at once
fp = &fpoly16[i&0xffff];
num = (blocknum<<32)|((1l<<32)-1);
num = num >> fp->div2;
num = num * fp->mul3;
num = num + fp->add;
clz_value = __lzcnt64(num);
if (clz64[clz_value] + fp->steps < global_maxsteps) continue;
// no cutoff so calculate each entry
for (j=0;j<(1<<(32-CUTOFF_WIDTH));j++){
num = (blocknum<<32)|(j<<CUTOFF_WIDTH)|i;
// ignore evens + 5 mod 6 + 2,4,8 mod 9
switch(num%18){
case 0: // even
case 2: // even, 2 mod 9
case 4: // even, 4 mod 9
case 5: // 5 mod 6
case 6: // even
case 8: // even, 8 mod 9
case 10: // even
case 11: // 2 mod 9, 5 mod 6
case 12: // even
case 13: // 4 mod 9
case 14: // even
case 16: // even
case 17: // 8 mod 9, 5 mod 6
continue;
}
num = num >> fp->div2;
num = num * fp->mul3 + fp->add;
n[0] = num & 0xffffffff;
n[1] = num >> 32;
steps = fp->steps;
peak_found = 0;
#if EXPERIMENTAL
hail64an(num,steps,global_maxsteps,global_maxvalue_size,global_maxvalue,&peak_found);
#else
hail64pnf(n,&steps,&global_maxsteps,global_maxvalue,global_maxvalue_size,&peak_found);
hail32pnf(n,&steps,&global_maxsteps,global_maxvalue,global_maxvalue_size,&peak_found);
#endif
if (peak_found){
// oops we found one, so exit for more complete search
if (!dflag)
fprintf(logfile,"peak found for %lu\n",(blocknum<<32)|(j<<CUTOFF_WIDTH)|i);
return 1;
}
}
} else {
// possible max in values
for (j=0;j<(1<<(32-CUTOFF_WIDTH));j++){
num = (blocknum<<32)|(j<<CUTOFF_WIDTH)|i;
// ignore evens + 5 mod 6 + 2,4,8 mod 9
switch(num%18){
case 0: // even
case 2: // even, 2 mod 9
case 4: // even, 4 mod 9
case 5: // 5 mod 6
case 6: // even
case 8: // even, 8 mod 9
case 10: // even
case 11: // 2 mod 9, 5 mod 6
case 12: // even
case 13: // 4 mod 9
case 14: // even
case 16: // even
case 17: // 8 mod 9, 5 mod 6
continue;
}
steps = 0;
n[0] = (j<<CUTOFF_WIDTH)|i;
n[1] = blocknum;
peak_found = 0;
#if EXPERIMENTAL
hail64am(num,steps,global_maxsteps,global_maxvalue_size,global_maxvalue,&peak_found);
#else
hail64pmf(n,&steps,&global_maxsteps,global_maxvalue,global_maxvalue_size,&peak_found);
hail32pnf(n,&steps,&global_maxsteps,global_maxvalue,global_maxvalue_size,&peak_found);
#endif
if (peak_found){
if (!dflag)
fprintf(logfile,"peak found for %lu\n",(blocknum<<32)|(j<<CUTOFF_WIDTH)|i);
// oops we found one, so exit for more complete search
return 1;
}
}
}
}
return 0;
}
int main(int argc,char **argv){
uint64_t i,j;
time_t start_time,start_time2,now;
struct tm tmval;
char timebuf[120];
int search_parallel,found_peak;
int sockfd,n,len;
struct servent *service;
struct sockaddr_in server_addr,client_addr;
struct hail_packet packet;
parse_options(argc,argv);
if (dflag){
sockfd = socket(AF_INET,SOCK_DGRAM,0);
memset(&server_addr,0,sizeof(server_addr));
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr=htonl(INADDR_ANY);
if ((service = getservbyname("haild","udp")) == NULL){
fprintf(stderr,"can't find haild/udp in /etc/services\n");
exit(0);
}
server_addr.sin_port = htons(service->s_port);
bind(sockfd,(struct sockaddr *) &server_addr,sizeof(server_addr));
while (1){
len = sizeof(client_addr);
n = recvfrom(sockfd,&packet,sizeof(packet),0,
(struct sockaddr *) &client_addr,&len);
if (packet.message == HAIL_FASTSEARCH){
start_block = packet.start_block;
num_blocks = packet.num_blocks;
global_maxsteps = packet.maxsteps;
global_maxvalue_size = packet.maxvalue_size;
for (j=0;j<packet.maxvalue_size;j++){
global_maxvalue[j] = packet.maxvalue[j];
}
found_peak = 0;
#pragma omp parallel for shared(found_peak)
for (j=0;j<num_blocks;j++){
if (fast_search(start_block+j))
found_peak = 1;
}
packet.message = (found_peak)? HAIL_PEAKFOUND : HAIL_NOPEAK;
} else {
packet.message = HAIL_ERROR;
}
client_addr.sin_port = htons(service->s_port+1);
// send back a response
sendto(sockfd,&packet,
sizeof(packet),0,
(struct sockaddr *) &client_addr,sizeof(client_addr));
}
} else {
recover_checkpoint_file();
}
for (i=0;i<num_blocks;){
time(&start_time);
localtime_r(&start_time,&tmval);
strftime(timebuf,sizeof(timebuf),"%F %T",&tmval);
if (wflag && wvalue > 1){
// try fast search in parallel...
search_parallel = 1;
for (j=0;j<wvalue;j++){
if (check_prediction(start_block+i+j) != 0){
search_parallel = 0;
break;
}
}
if (search_parallel == 0){
// predictions, so search next block only
fprintf(logfile,"%s: Search of block: %lu\n",timebuf,start_block+i);
search_block(start_block+i);
remove_prediction(start_block+i);
i++;
} else {
// no predictions, so try search in parallel
fprintf(logfile,"%s: Fast parallel search of blocks: %lu-%lu\n",timebuf,start_block+i,start_block+i+wvalue-1);
found_peak = 0;
#pragma omp parallel for shared(found_peak)
for (j=0;j<wvalue;j++){
if (fast_search(start_block+i+j)){
found_peak = 1;
}
}
if (found_peak == 1){
// failure, one of the parallel searches found a peak
time(&start_time2);
localtime_r(&start_time2,&tmval);
strftime(timebuf,sizeof(timebuf),"%F %T",&tmval);
fprintf(logfile,"%s: Peak found, parallel search of blocks: %lu-%lu, %ld seconds\n",
timebuf,start_block+i,start_block+i+wvalue-1,start_time2-start_time);
start_time = start_time2;
search_block(start_block+i);
i++;
} else {
// success! no peaks found in parallel search
i += wvalue;
}
}
} else if (check_prediction(start_block+i) == 0){
// no parallel search but try a fast search first
fprintf(logfile,"%s: Fast search of block: %lu\n",timebuf,start_block+i);
if (fast_search(start_block+i)){
time(&start_time2);
localtime_r(&start_time2,&tmval);
strftime(timebuf,sizeof(timebuf),"%F %T",&tmval);
fprintf(logfile,"%s: Peak found, search of block: %lu, %ld seconds\n",timebuf,start_block+i,start_time2-start_time);
start_time = start_time2;
search_block(start_block+i);
}
i++;
} else {
// prediction, so start with slow search
fprintf(logfile,"%s: Search of block: %lu\n",timebuf,start_block+i);
search_block(start_block+i);
remove_prediction(start_block+i);
i++;
}
time(&now);
localtime_r(&now,&tmval);
strftime(timebuf,sizeof(timebuf),"%F %T",&tmval);
fprintf(logfile,"%s: Search complete, %ld seconds\n",timebuf,now-start_time);
}
save_checkpoint_file(start_block+i);
return 0;
}
|
ztradd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pztradd() function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^H, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of op( A ) and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
* n >= 0.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] A
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa = PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] B
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_ztradd
* @sa plasma_ctradd
* @sa plasma_dtradd
* @sa plasma_stradd
*
******************************************************************************/
int plasma_ztradd(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
plasma_complex64_t alpha, plasma_complex64_t *pA, int lda,
plasma_complex64_t beta, plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (pA == NULL) {
plasma_error("NULL A");
return -6;
}
int am, an;
if (transa == PlasmaNoTrans) {
am = m;
an = n;
}
else {
am = n;
an = m;
}
int bm = m;
int bn = n;
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (pB == NULL) {
plasma_error("NULL B");
return -9;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_tradd(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_ztradd(uplo, transa,
alpha, A,
beta, B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pztradd() function from the PBLAS library. Non-blocking tile version of
* plasma_ztradd(). May return before the computation is finished. Operates
* on matrices stored by tiles. All matrices are passed through descriptors.
* All dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of op( A ) and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ztradd
* @sa plasma_omp_ctradd
* @sa plasma_omp_dtradd
* @sa plasma_omp_stradd
*
******************************************************************************/
void plasma_omp_ztradd(plasma_enum_t uplo, plasma_enum_t transa,
plasma_complex64_t alpha, plasma_desc_t A,
plasma_complex64_t beta, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int am = transa == PlasmaNoTrans ? A.m : A.n;
if ((alpha == 0.0 || am == 0) && beta == 1.0)
return;
// Call parallel function.
plasma_pztradd(uplo, transa,
alpha, A,
beta, B,
sequence, request);
}
|
jacobi25_2d-p.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
/*
* N is the number of points
* T is the number of timesteps
*/
#ifdef HAS_DECLS
#include "decls.h"
#else
#define N 8000L
#define T 100000L
#endif
#define NUM_FP_OPS 10
/* Define our arrays */
double A[2][N][N];
double total=0; double sum_err_sqr=0;
int chtotal=0;
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) {
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
return x->tv_sec < y->tv_sec;
}
int main(int argc, char * argv[]) {
long int t, i, j, k;
const int BASE = 1024;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0;
printf("Number of points = %ld\t|Number of timesteps = %ld\t", N*N, T);
/* Initialization */
srand(42); // seed with a constant value to verify results
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[0][i][j] = 1.0 * (rand() % BASE);
}
}
#ifdef TIME
gettimeofday(&start, 0);
#endif
// (i-2)<(0)?(N+(i-2)):(i-2)
// (i==0)?(N-1):(i-1)
// (i==N-1)?(0):(i+1)
// (i+2)>(N-1)?(i+3-N)?(i+2)
// (j-2)<(0)?(N+(j-2)):(j-2)
// (j==0)?(N-1):(j-1)
// (j==N-1)?(0):(j+1)
// (j+2)>(N-1)?(j+3-N)?(j+2)
// #undef N
// #define N 8000L
#undef T
#define T 50000
/* Copyright (C) 1991-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* We do support the IEC 559 math functionality, real and complex. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((N >= 1) && (T >= 1)) {
for (t1=0;t1<=T-1;t1++) {
lbp=0;
ubp=floord(N-1,2);
#pragma omp parallel for private(lbv,ubv,t4,t5,t6)
for (t3=lbp;t3<=ubp;t3++) {
for (t4=0;t4<=floord(N-1,256);t4++) {
for (t5=2*t3;t5<=min(N-1,2*t3+1);t5++) {
lbv=256*t4;
ubv=min(N-1,256*t4+255);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[1][t5][t6] = 0.04*( ((t5-2)<(0)?( (t6-2)<(0)?A[0][N+(t5-2)][N+(t6-2)]:A[0][N+(t5-2)][t6-2] +(t6==0)?A[0][N+(t5-2)][N-1]:A[0][N+(t5-2)][t6-1] +A[0][N+(t5-2)][t6] +(t6==N-1)?A[0][N+(t5-2)][0]:A[0][N+(t5-2)][t6+1] +(t6+2)>(N-1)?A[0][N+(t5-2)][t6+3-N]:A[0][N+(t5-2)][t6+2] ):( (t6-2)<(0)?A[0][t5-2][N+(t6-2)]:A[0][t5-2][t6-2] +(t6==0)?A[0][t5-2][N-1]:A[0][t5-2][t6-1] +A[0][t5-2][t6] +(t6==N-1)?A[0][t5-2][0]:A[0][t5-2][t6+1] +(t6+2)>(N-1)?A[0][t5-2][t6+3-N]:A[0][t5-2][t6+2])) +((t5==0)?( (t6-2)<(0)?A[0][(N-1)][(N+(t6-2))]:A[0][(N-1)][(t6-2)] +(t6==0)?A[0][(N-1)][(N-1)]:A[0][(N-1)][(t6-1)] +A[0][(N-1)][t6] +(t6==N-1)?A[0][(N-1)][(0)]:A[0][(N-1)][(t6+1)] +(t6+2)>(N-1)?A[0][(N-1)][(t6+3-N)]:A[0][(N-1)][(t6+2)] ):( (t6-2)<(0)?A[0][(t5-1)][(N+(t6-2))]:A[0][(t5-1)][(t6-2)] +(t6==0)?A[0][(t5-1)][(N-1)]:A[0][(t5-1)][(t6-1)] +A[0][(t5-1)][t6] +(t6==N-1)?A[0][(t5-1)][(0)]:A[0][(t5-1)][(t6+1)] +(t6+2)>(N-1)?A[0][(t5-1)][(t6+3-N)]:A[0][(t5-1)][(t6+2)])) +( (t6-2)<(0)?A[0][t5][(N+(t6-2))]:A[0][t5][(t6-2)] +(t6==0)?A[0][t5][(N-1)]:A[0][t5][(t6-1)] +A[0][t5][t6] +(t6==N-1)?A[0][t5][(0)]:A[0][t5][(t6+1)] +(t6+2)>(N-1)?A[0][t5][(t6+3-N)]:A[0][t5][(t6+2)]) +((t5==N-1)?( (t6-2)<(0)?A[0][(0)][(N+(t6-2))]:A[0][(0)][(t6-2)] +(t6==0)?A[0][(0)][(N-1)]:A[0][(0)][(t6-1)] +A[0][(0)][t6] +(t6==N-1)?A[0][(0)][(0)]:A[0][(0)][(t6+1)] +(t6+2)>(N-1)?A[0][(0)][(t6+3-N)]:A[0][(0)][(t6+2)] ):( (t6-2)<(0)?A[0][(t5+1)][(N+(t6-2))]:A[0][(t5+1)][(t6-2)] +(t6==0)?A[0][(t5+1)][(N-1)]:A[0][(t5+1)][(t6-1)] +A[0][(t5+1)][t6] +(t6==N-1)?A[0][(t5+1)][(0)]:A[0][(t5+1)][(t6+1)] +(t6+2)>(N-1)?A[0][(t5+1)][(t6+3-N)]:A[0][(t5+1)][(t6+2)])) +((t5+2)>(N-1)?( (t6-2)<(0)?A[0][(t5+3-N)][(N+(t6-2))]:A[0][(t5+3-N)][(t6-2)] +(t6==0)?A[0][(t5+3-N)][(N-1)]:A[0][(t5+3-N)][(N-1)] +A[0][(t5+3-N)][t6] +(t6==N-1)?A[0][(t5+3-N)][(0)]:A[0][(t5+3-N)][(0)] +(t6+2)>(N-1)?A[0][(t5+3-N)][(t6+3-N)]:A[0][(t5+3-N)][(t6+3-N)] ):( +(t6-2)<(0)?A[0][(t5+2)][(N+(t6-2))]:A[0][(t5+2)][(t6-2)] +(t6==0)?A[0][(t5+2)][(N-1)]:A[0][(t5+2)][(t6-1)] +A[0][(t5+2)][t6] +(t6==N-1)?A[0][(t5+2)][(0)]:A[0][(t5+2)][(t6+1)] +(t6+2)>(N-1)?A[0][(t5+2)][(t6+3-N)]:A[0][(t5+2)][(t6+2)])) );;
}
}
}
}
lbp=1;
ubp=floord(N-3,2);
#pragma omp parallel for private(lbv,ubv,t4,t5,t6)
for (t3=lbp;t3<=ubp;t3++) {
for (t4=0;t4<=floord(N-3,256);t4++) {
for (t5=2*t3;t5<=min(N-3,2*t3+1);t5++) {
lbv=max(2,256*t4);
ubv=min(N-3,256*t4+255);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[0][t5][t6] = 0.04*( ((t5-2)<(0)?( (t6-2)<(0)?A[1][N+(t5-2)][N+(t6-2)]:A[1][N+(t5-2)][t6-2] +(t6==0)?A[1][N+(t5-2)][N-1]:A[1][N+(t5-2)][t6-1] +A[1][N+(t5-2)][t6] +(t6==N-1)?A[1][N+(t5-2)][0]:A[1][N+(t5-2)][t6+1] +(t6+2)>(N-1)?A[1][N+(t5-2)][t6+3-N]:A[1][N+(t5-2)][t6+2] ):( (t6-2)<(0)?A[1][t5-2][N+(t6-2)]:A[1][t5-2][t6-2] +(t6==0)?A[1][t5-2][N-1]:A[1][t5-2][t6-1] +A[1][t5-2][t6] +(t6==N-1)?A[1][t5-2][0]:A[1][t5-2][t6+1] +(t6+2)>(N-1)?A[1][t5-2][t6+3-N]:A[1][t5-2][t6+2])) +((t5==0)?( (t6-2)<(0)?A[1][(N-1)][(N+(t6-2))]:A[1][(N-1)][(t6-2)] +(t6==0)?A[1][(N-1)][(N-1)]:A[1][(N-1)][(t6-1)] +A[1][(N-1)][t6] +(t6==N-1)?A[1][(N-1)][(0)]:A[1][(N-1)][(t6+1)] +(t6+2)>(N-1)?A[1][(N-1)][(t6+3-N)]:A[1][(N-1)][(t6+2)] ):( (t6-2)<(0)?A[1][(t5-1)][(N+(t6-2))]:A[1][(t5-1)][(t6-2)] +(t6==0)?A[1][(t5-1)][(N-1)]:A[1][(t5-1)][(t6-1)] +A[1][(t5-1)][t6] +(t6==N-1)?A[1][(t5-1)][(0)]:A[1][(t5-1)][(t6+1)] +(t6+2)>(N-1)?A[1][(t5-1)][(t6+3-N)]:A[1][(t5-1)][(t6+2)])) +( (t6-2)<(0)?A[1][t5][(N+(t6-2))]:A[1][t5][(t6-2)] +(t6==0)?A[1][t5][(N-1)]:A[1][t5][(t6-1)] +A[1][t5][t6] +(t6==N-1)?A[1][t5][(0)]:A[1][t5][(t6+1)] +(t6+2)>(N-1)?A[1][t5][(t6+3-N)]:A[1][t5][(t6+2)]) +((t5==N-1)?( (t6-2)<(0)?A[1][(0)][(N+(t6-2))]:A[1][(0)][(t6-2)] +(t6==0)?A[1][(0)][(N-1)]:A[1][(0)][(t6-1)] +A[1][(0)][t6] +(t6==N-1)?A[1][(0)][(0)]:A[1][(0)][(t6+1)] +(t6+2)>(N-1)?A[1][(0)][(t6+3-N)]:A[1][(0)][(t6+2)] ):( (t6-2)<(0)?A[1][(t5+1)][(N+(t6-2))]:A[1][(t5+1)][(t6-2)] +(t6==0)?A[1][(t5+1)][(N-1)]:A[1][(t5+1)][(t6-1)] +A[1][(t5+1)][t6] +(t6==N-1)?A[1][(t5+1)][(0)]:A[1][(t5+1)][(t6+1)] +(t6+2)>(N-1)?A[1][(t5+1)][(t6+3-N)]:A[1][(t5+1)][(t6+2)])) +((t5+2)>(N-1)?( (t6-2)<(0)?A[1][(t5+3-N)][(N+(t6-2))]:A[1][(t5+3-N)][(t6-2)] +(t6==0)?A[1][(t5+3-N)][(N-1)]:A[1][(t5+3-N)][(N-1)] +A[1][(t5+3-N)][t6] +(t6==N-1)?A[1][(t5+3-N)][(0)]:A[1][(t5+3-N)][(0)] +(t6+2)>(N-1)?A[1][(t5+3-N)][(t6+3-N)]:A[1][(t5+3-N)][(t6+3-N)] ):( +(t6-2)<(0)?A[1][(t5+2)][(N+(t6-2))]:A[1][(t5+2)][(t6-2)] +(t6==0)?A[1][(t5+2)][(N-1)]:A[1][(t5+2)][(t6-1)] +A[1][(t5+2)][t6] +(t6==N-1)?A[1][(t5+2)][(0)]:A[1][(t5+2)][(t6+1)] +(t6+2)>(N-1)?A[1][(t5+2)][(t6+3-N)]:A[1][(t5+2)][(t6+2)])) );;
}
}
}
}
}
}
/* End of CLooG code */
#undef T
#define T 100000
// #undef N
// #define N 16000L
#ifdef TIME
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6);
printf("|Time taken = %7.5lfs\n", tdiff );
printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N *N * T) / tdiff) / 1000000L));
#endif
#ifdef VERIFY
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
total+= A[T%2][i][j] ;
}
}
printf("|sum: %e\t", total);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
sum_err_sqr += (A[T%2][i][j] - (total/N))*(A[T%2][i][j] - (total/N));
}
}
printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr));
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
chtotal += ((char *)A[T%2][i])[j];
}
}
printf("|sum(rep(A)) = %d\n", chtotal);
#endif
return 0;
}
// icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm
// /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/
// /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/
// /* @ end @*/
|
GB_binop__iseq_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int32)
// A*D function (colscale): GB (_AxD__iseq_int32)
// D*A function (rowscale): GB (_DxB__iseq_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int32)
// C=scalar+B GB (_bind1st__iseq_int32)
// C=scalar+B' GB (_bind1st_tran__iseq_int32)
// C=A+scalar GB (_bind2nd__iseq_int32)
// C=A'+scalar GB (_bind2nd_tran__iseq_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
if-clause.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv)
{
int i, n=20, tid, x;
int a[n],suma=0,sumalocal;
if(argc < 3) {
fprintf(stderr,"Uso: \n %s <num-iteraciones> <num-hebras>\n", argv[0]);
exit(-1);
}
n = atoi(argv[1]); if (n>20) n=20;
for (i=0; i<n; i++) {
a[i] = i;
}
x = atoi(argv[2]); if (x < 1) x = 1;
#pragma omp parallel if(n>4) default(none) private(sumalocal,tid) \
shared(a,suma,n,x) num_threads(x)
{ sumalocal=0;
tid=omp_get_thread_num();
#pragma omp for private(i) schedule(static) nowait
for (i=0; i<n; i++)
{ sumalocal += a[i];
printf(" thread %d suma de a[%d]=%d sumalocal=%d \n",tid,i,a[i],sumalocal);
}
#pragma omp atomic
suma += sumalocal;
#pragma omp barrier
#pragma omp master
printf("thread master=%d imprime suma=%d\n",tid,suma);
}
}
|
GB_binop__bxor_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__bxor_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__bxor_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int32)
// C=scalar+B GB (_bind1st__bxor_int32)
// C=scalar+B' GB (_bind1st_tran__bxor_int32)
// C=A+scalar GB (_bind2nd__bxor_int32)
// C=A'+scalar GB (_bind2nd_tran__bxor_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_INT32 || GxB_NO_BXOR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLS: GB_builder
// TODO:: this is way too complicated. Reduce it to two cases:
// C = A' both C and A are passed in, not Chandle
// C = C' C is transposed in-place, (C==A aliased)
// In both cases, require the header for C and A to be allocated already
// (either static or dynamic). A is never modified, unless C==A.
// C and A cannot be NULL on input. If C==A then C contains a valid
// matrix on input (the input matrix A), otherise GB_phbix_free(C) is
// immediately done. Either header may be static or dynamic.
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in-place, in which case C or
// A are modified in-place.
// The input matrix may have shallow components (even if in-place), and the
// output may also have shallow components (even if the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// There are 3 ways to use this function:
// Method1 (in_place_C): If A_in is not NULL and Chandle is NULL, then A is
// modified in-place, and the A_in matrix is not freed when done.
// Method2 (in_place_A): If A_in is NULL, then C = (*Chandle) is transposed
// in-place. If out of memory, (*Chandle) is always returned as NULL, which
// frees the input matrix C if the transpose is done in-place.
// Method3 (C=A'): Otherwise, A_in and Chandle are non-NULL, and C=A' is
// computed. If (*Chandle) is NULL on input, then a new header is allocated.
// If (*Chandle) is non-NULL on input, it is assumed to be an uninitialized
// static header, not obtained from malloc. On output, C->static_header will
// be true.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. The GB_builder method is more
// scalable, but not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_ALL ;
// free prior content of A, if transpose is done in-place
#define GB_FREE_IN_PLACE_A \
{ \
if (in_place) \
{ \
/* A is being transposed in-place */ \
/* free prior content of A but not &A itself */ \
if (!Ap_shallow) GB_FREE (&Ap, Ap_size) ; \
if (!Ah_shallow) GB_FREE (&Ah, Ah_size) ; \
if (!Ab_shallow) GB_FREE (&Ab, Ab_size) ; \
if (!Ai_shallow) GB_FREE (&Ai, Ai_size) ; \
if (!Ax_shallow) GB_FREE (&Ax, Ax_size) ; \
} \
else \
{ \
/* A is not modified; it is purely an input matrix */ \
; \
} \
}
// free the new C matrix, unless C=A' is being done in-place of A
#define GB_FREE_C \
{ \
if (!in_place_A) \
{ \
/* free all of C and all its contents &C */ \
GB_Matrix_free (Chandle) ; \
} \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A')
(
GrB_Matrix *Chandle, // output matrix C, possibly modified in-place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A_in, // input matrix
// no operator is applied if both op1 and op2 are NULL
const GrB_UnaryOp op1_in, // unary operator to apply
const GrB_BinaryOp op2_in, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in-place
//--------------------------------------------------------------------------
GrB_Info info ;
GBURBLE ("(transpose) ") ;
GrB_Matrix A, C ;
bool in_place_C, in_place_A ;
bool C_static_header = false ;
struct GB_Matrix_opaque T_header ;
GrB_Matrix T = GB_clear_static_header (&T_header) ;
if (A_in == NULL)
{
//----------------------------------------------------------------------
// Method1 (in_place_C): C = C' ; &C is transposed in-place
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, NULL, op) ;
// C=A' is transposed in-place, in the matrix C.
// The matrix C is freed if an error occurs and C is set to NULL.
ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL
ASSERT (*Chandle != NULL) ;
A = (*Chandle) ;
C = A ; // C must be freed if an error occurs
in_place_C = true ; // C is modified in-place
in_place_A = false ;
ASSERT (A == C && C == (*Chandle)) ;
C_static_header = C->static_header ;
}
else if (Chandle == NULL || (*Chandle) == A_in)
{
//----------------------------------------------------------------------
// Method2 (in_place_A): A = A' ; A transposed in-place
//----------------------------------------------------------------------
// GB_transpose (NULL, ctype, csc, A, op) ;
// GB_transpose (&A, ctype, csc, A, op) ;
// C=A' is transposed in-place, in the matrix A.
// The matrix A_in is not freed if an error occurs.
A = A_in ;
Chandle = &A ; // C must not be freed if an error occurs
C = A ;
in_place_C = false ;
in_place_A = true ; // A is modified in-place
ASSERT (A == C && C == (*Chandle)) ;
C_static_header = A->static_header ;
}
else
{
//----------------------------------------------------------------------
// Method3 (C=A'): C and A are different; C may be a static header
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, A, op) ;
// &C and A are both non-NULL, and not aliased.
// C=A' where C is a new matrix constructed here.
// The matrix C is freed if an error occurs, and C is set to NULL
// unless C has a static header.
// If C is NULL, a new header is allocated and returned as (*Chandle).
// Otherwise, C = (*Chandle) is assumed to be an uninitialized static
// header, and (*Chandle) is not modified.
A = A_in ;
C = (*Chandle) ; // NULL, or pre-existing static header
in_place_C = false ; // C and A are different matrices
in_place_A = false ;
ASSERT (A != C && C == (*Chandle)) ;
C_static_header = (C != NULL) ;
if (C_static_header)
{
GB_clear_static_header (C) ;
}
}
bool in_place = (in_place_A || in_place_C) ;
ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ;
ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ;
ASSERT_UNARYOP_OK_OR_NULL (op1_in, "unop for GB_transpose", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (op2_in, "binop for GB_transpose", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ;
ASSERT (C == (*Chandle)) ; // both may be NULL
// get the current sparsity control of A
float A_hyper_switch = A->hyper_switch ;
float A_bitmap_switch = A->bitmap_switch ;
int A_sparsity = A->sparsity ;
// wait if A has pending tuples or zombies, but leave it jumbled
GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (A) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t anzmax = A->nzmax ;
// if in-place, these must be freed when done, whether successful or not
int64_t *restrict Ap = A->p ; size_t Ap_size = A->p_size ;
int64_t *restrict Ah = A->h ; size_t Ah_size = A->h_size ;
int64_t *restrict Ai = A->i ; size_t Ab_size = A->b_size ;
int8_t *restrict Ab = A->b ; size_t Ai_size = A->i_size ;
GB_void *restrict Ax = (GB_void *) A->x ; size_t Ax_size = A->x_size ;
bool A_is_bitmap = GB_IS_BITMAP (A) ;
bool A_is_packed = GB_is_packed (A) ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
bool Ap_shallow = A->p_shallow ;
bool Ah_shallow = A->h_shallow ;
bool Ai_shallow = A->i_shallow ;
bool Ax_shallow = A->x_shallow ;
bool Ab_shallow = A->b_shallow ;
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
int64_t anvals = A->nvals ;
//--------------------------------------------------------------------------
// determine the max number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// determine the type of C and get the unary or binary operator
//--------------------------------------------------------------------------
// If a unary or binary operator is present, C is always returned as
// the ztype of the operator. The input ctype is ignored.
GrB_UnaryOp op1 = NULL ;
GrB_BinaryOp op2 = NULL ;
GB_Opcode opcode = GB_NOP_opcode ;
if (op1_in != NULL)
{
// get the unary operator
opcode = op1_in->opcode ;
if (atype == op1_in->xtype && opcode == GB_IDENTITY_opcode)
{
// op1 is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast. op1 is NULL.
ctype = atype ;
}
else
{
// apply the operator, z=op1(x)
op1 = op1_in ;
ctype = op1->ztype ;
}
}
else if (op2_in != NULL)
{
// get the binary operator
GrB_Type op2_intype = binop_bind1st ? op2_in->xtype : op2_in->ytype ;
opcode = op2_in->opcode ;
// only GB_apply calls GB_transpose with op2_in, and it ensures this
// condition holds: the first(A,y), second(x,A), and any(...) have
// been renamed to identity(A), so these cases do not occur here.
ASSERT (!
((opcode == GB_ANY_opcode) ||
(opcode == GB_FIRST_opcode && !binop_bind1st) ||
(opcode == GB_SECOND_opcode && binop_bind1st))) ;
// apply the operator, z=op2(A,y) or op2(x,A)
op2 = op2_in ;
ctype = op2->ztype ;
}
else
{
// no operator. both op1 and op2 are NULL
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// check for positional operators
//--------------------------------------------------------------------------
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
GrB_UnaryOp save_op1 = op1 ;
GrB_BinaryOp save_op2 = op2 ;
if (op_is_positional)
{
// do not apply the op until after the transpose
op1 = NULL ;
op2 = NULL ;
// replace op1 with the ONE operator, as a placeholder
ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32) ;
op1 = (ctype == GrB_INT64) ? GxB_ONE_INT64 : GxB_ONE_INT32 ;
}
//--------------------------------------------------------------------------
// C = A'
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
bool allocate_new_Cx = (ctype != atype) || (op1 != NULL) || (op2 != NULL) ;
if (anz == 0)
{
//======================================================================
// quick return if A is empty
//======================================================================
// free prior space of A, if transpose is done in-place
GB_FREE_IN_PLACE_A ;
// A is empty; create a new empty matrix C, with the new type and
// dimensions. C is hypersparse for now but may convert when
// returned.
info = GB_new_bix (Chandle, C_static_header, // hyper, old or new header
ctype, avdim, avlen, GB_Ap_calloc, C_is_csc,
GxB_HYPERSPARSE, true, A_hyper_switch, 1, 1, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "C transpose empty", GB0) ;
ASSERT (!GB_JUMBLED (*Chandle)) ;
}
else if (A_is_packed)
{
//======================================================================
// transpose a packed or bitmap matrix or vector
//======================================================================
// A is packed if it is either: (a) bitmap, (b) full, or (c) sparse or
// hypersparse with all entries present, no zombies, no pending tuples,
// and not jumbled. For (c), the matrix A can be treated as if it was
// full, and the pattern (A->p, A->h, and A->i) can be ignored.
int sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ;
bool T_cheap = // T can be done quickly if:
(avlen == 1 || avdim == 1) // A is a row or column vector,
&& op1 == NULL && op2 == NULL // no operator to apply, and
&& atype == ctype ; // no typecasting
// allocate T
if (T_cheap)
{
// just initialize the static header of T, not T->b or T->x
info = GB_new (&T, true, // bitmap or full, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
sparsity, A_hyper_switch, 1, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// allocate all of T, including T->b and T->x
info = GB_new_bix (&T, true, // bitmap or full, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
sparsity, true, A_hyper_switch, 1, anzmax, true, Context) ;
}
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
T->magic = GB_MAGIC ;
if (sparsity == GxB_BITMAP)
{
T->nvals = anvals ; // for bitmap case only
}
//----------------------------------------------------------------------
// T = A'
//----------------------------------------------------------------------
// Since A is full, # threads to use is nthreads, and the
// nworkspaces parameter is not used
int64_t anz_held = GB_NNZ_HELD (A) ;
ASSERT (anz > 0 && anz_held > 0) ;
int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ;
if (T_cheap)
{
// no work to do. Transposing does not change A->b or A->x
T->b = Ab ; T->b_size = Ab_size ;
T->x = Ax ; T->x_size = Ax_size ;
T->nzmax = A->nzmax ;
if (in_place)
{
// transplant A->b and A->x into T
T->b_shallow = Ab_shallow ;
T->x_shallow = Ax_shallow ;
Ab = NULL ; // do not free prior Ab
Ax = NULL ; // do not free prior Ax
A->b = NULL ;
A->x = NULL ;
}
else
{
// T is a purely shallow copy of A
T->b_shallow = (Ab != NULL) ;
T->x_shallow = true ;
}
}
else if (op1 == NULL && op2 == NULL)
{
// do not apply an operator; optional typecast to C->type
GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ;
}
else
{
// apply an operator, C has type op->ztype
GB_transpose_op (T, op1, op2, scalar, binop_bind1st, A,
NULL, NULL, 0, nthreads) ;
}
ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ;
ASSERT (!GB_JUMBLED (T)) ;
// free prior space of A, if transpose is done in-place
GB_FREE_IN_PLACE_A ;
//----------------------------------------------------------------------
// transplant T into C
//----------------------------------------------------------------------
// allocate the output matrix C as a full or bitmap matrix
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, C_static_header, // bitmap/full, old/new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
sparsity, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place,
ASSERT (!C_static_header) ; // or if C has a static header
GB_FREE_C ;
GB_phbix_free (T) ;
return (info) ;
}
// Transplant T into the result C, making a copy if T is shallow
info = GB_transplant (*Chandle, ctype, &T, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "Chandle, GB_transpose, bitmap/full", GB0) ;
}
else if (avdim == 1)
{
//======================================================================
// transpose a "column" vector into a "row"
//======================================================================
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be sorted first.
ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ;
GB_MATRIX_WAIT (A) ;
ASSERT (!GB_JUMBLED (A)) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, C->b, or C->x
// content, and initialize the type and dimension of C. The new matrix
// is hypersparse. This step does not allocate anything if in-place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, C_static_header, // hyper; old or new header
ctype, 1, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place,
ASSERT (!C_static_header) ; // or if C has a static header
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ;
int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ;
GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ;
bool ok = true ;
Cp = GB_MALLOC (anz+1, int64_t, &Cp_size) ;
Ci = GB_MALLOC (anz , int64_t, &Ci_size) ;
ok = (Cp != NULL && Ci != NULL) ;
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
ASSERT (anz > 0) ;
Cx = GB_MALLOC (anz * ctype->size, GB_void, &Cx_size) ;
ok = ok && (Cx != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (&Cp, Cp_size) ;
GB_FREE (&Ci, Ci_size) ;
GB_FREE (&Cx, Cx_size) ;
GB_FREE_IN_PLACE_A ;
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// fill the content of C
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Cx = op (A)
info = GB_apply_op ( // op1 != identity of same types
(GB_void *) Cx, op1, op2, scalar, binop_bind1st, A, Context) ;
// GB_apply_op can only fail if op1/op2 are positional
ASSERT (!GB_OP_IS_POSITIONAL (op1)) ;
ASSERT (!GB_OP_IS_POSITIONAL (op2)) ;
ASSERT (info == GrB_SUCCESS) ;
C->x = Cx ; C->x_size = Cx_size ;
C->x_shallow = false ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_size = Cx_size ;
C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, Ab, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A.
C->x = Ax ; C->x_size = Ax_size ;
C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
// each entry in A becomes a non-empty vector in C
// C is a hypersparse 1-by-avlen matrix
C->h = Ai ; C->h_size = Ai_size ;
C->h_shallow = (in_place) ? Ai_shallow : true ;
Ai = NULL ; // do not free prior Ai
// C->p = 0:anz and C->i = zeros (1,anz), newly allocated
C->plen = anz ;
C->nvec = anz ;
C->nvec_nonempty = anz ;
C->i = Ci ; C->i_size = Ci_size ;
C->p = Cp ; C->p_size = Cp_size ;
// fill the vector pointers C->p
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < anz ; k++)
{
Ci [k] = 0 ;
Cp [k] = k ;
}
Cp [anz] = anz ;
C->nzmax = anz ;
C->magic = GB_MAGIC ;
// free prior space of A, if transpose is done in-place
GB_FREE_IN_PLACE_A ;
}
else if (avlen == 1)
{
//======================================================================
// transpose a "row" into a "column" vector
//======================================================================
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ;
//----------------------------------------------------------------------
// allocate workspace, if needed
//----------------------------------------------------------------------
int ntasks = 0 ;
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
GB_WERK_DECLARE (Count, int64_t) ;
if (nth > 1 && !A_is_hyper)
{
// ntasks and Count are not needed if nth == 1
ntasks = 8 * nth ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
GB_WERK_PUSH (Count, ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
}
// Allocate the header of C, with no C->p, C->h, C->i, or C->x content,
// and initialize the type and dimension of C. If in-place, A->p,
// A->h, A->i, and A->x are all NULL. The new matrix is sparse, but
// can be CSR or CSC. This step does not allocate anything if in
// place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, C_static_header, // sparse; old or new header
ctype, avdim, 1, GB_Ap_null, C_is_csc,
GxB_SPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place,
ASSERT (!C_static_header) ; // or if C has a static header
GB_FREE_C ;
GB_WERK_POP (Count, int64_t) ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ;
int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ;
GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ;
bool ok = true ;
Cp = GB_MALLOC (2, int64_t, &Cp_size) ;
ok = ok && (Cp != NULL) ;
if (!A_is_hyper)
{
// A is sparse, so new space is needed for Ci
Ci = GB_MALLOC (anz, int64_t, &Ci_size) ;
ok = ok && (Ci != NULL) ;
}
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void, &Cx_size) ;
ok = ok && (Cx != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (&Cp, Cp_size) ;
GB_FREE (&Ci, Ci_size) ;
GB_FREE (&Cx, Cx_size) ;
GB_WERK_POP (Count, int64_t) ;
GB_FREE_IN_PLACE_A ;
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
Cp [0] = 0 ;
Cp [1] = 0 ;
//----------------------------------------------------------------------
// numerical values of C: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Cx = op (A)
info = GB_apply_op ( // op1 != identity of same types
(GB_void *) Cx, op1, op2, scalar, binop_bind1st, A, Context) ;
// GB_apply_op can only fail if op1/op2 are positional
ASSERT (!GB_OP_IS_POSITIONAL (op1)) ;
ASSERT (!GB_OP_IS_POSITIONAL (op2)) ;
ASSERT (info == GrB_SUCCESS) ;
C->x = Cx ; C->x_size = Cx_size ;
C->x_shallow = false ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_size = Cx_size ;
C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, Ab, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A
C->x = Ax ; C->x_size = Ax_size ;
C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
//----------------------------------------------------------------------
// pattern of C
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in C
//------------------------------------------------------------------
C->i = Ah ; C->i_size = Ah_size ;
C->i_shallow = (in_place) ? Ah_shallow : true ;
ASSERT (anvec == anz) ;
Ah = NULL ; // do not free prior Ah
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in C
//------------------------------------------------------------------
ASSERT (Ah == NULL) ;
if (nth == 1)
{
//--------------------------------------------------------------
// construct Ci with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct Ci in parallel
//--------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
ASSERT (Ci [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
C->i = Ci ; C->i_size = Ci_size ;
C->i_shallow = false ;
}
//---------------------------------------------------------------------
// vector pointers of C
//---------------------------------------------------------------------
// C->p = [0 anz] and C->h = NULL
ASSERT (C->plen == 1) ;
ASSERT (C->nvec == 1) ;
ASSERT (C->h == NULL) ;
C->p = Cp ; C->p_size = Cp_size ;
C->p_shallow = false ;
C->nvec_nonempty = (anz == 0) ? 0 : 1 ;
// fill the vector pointers C->p
Cp [0] = 0 ;
Cp [1] = anz ;
C->nzmax = anz ;
C->magic = GB_MAGIC ;
ASSERT (!GB_JUMBLED (C)) ;
// free prior space of A, if transpose done in-place, and free workspace
GB_FREE_IN_PLACE_A ;
GB_WERK_POP (Count, int64_t) ;
}
else
{
//======================================================================
// transpose a general sparse or hypersparse matrix
//======================================================================
ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
int nworkspaces_bucket, nthreads_bucket ;
bool use_builder = GB_transpose_method (A,
&nworkspaces_bucket, &nthreads_bucket, Context) ;
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_builder)
{
//==================================================================
// transpose via GB_builder
//==================================================================
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
int64_t *iwork = NULL ; size_t iwork_size = 0 ;
iwork = GB_MALLOC (anz, int64_t, &iwork_size) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output. This phase
// must be done before Chandle is created below, since that step
// destroys A.
info = GB_extract_vector_list (iwork, A, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE (&iwork, iwork_size) ;
GB_FREE_C ;
return (info) ;
}
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and S)
//------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in-place.
// if *Chandle == NULL, allocate a new header; otherwise reuse
info = GB_new (Chandle, C_static_header, // hyper, old or new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place,
ASSERT (!C_static_header) ; // or if C has a static header
GB_FREE (&iwork, iwork_size) ;
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// if in_place, the prior Ap and Ah can now be freed
if (in_place)
{
if (!Ap_shallow) GB_FREE (&Ap, Ap_size) ;
if (!Ah_shallow) GB_FREE (&Ah, Ah_size) ;
}
int64_t *jwork = NULL ; size_t jwork_size = 0 ;
GB_Type_code scode ;
GB_void *S = NULL ;
GB_void *Swork = NULL ; size_t Swork_size = 0 ;
// for the GB_builder method, if the transpose is done in-place and
// A->i is not shallow, A->i can be used and then freed.
// Otherwise, A->i is not modified at all.
bool ok = true ;
bool recycle_Ai = (in_place && !Ai_shallow) ;
if (!recycle_Ai)
{
// allocate jwork of size anz
jwork = GB_MALLOC (anz, int64_t, &jwork_size) ;
ok = ok && (jwork != NULL) ;
}
if (op1 != NULL || op2 != NULL)
{
// allocate Swork of size anz * csize
Swork = GB_MALLOC (anz * csize, GB_void, &Swork_size) ;
ok = ok && (Swork != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (&iwork, iwork_size) ;
GB_FREE (&jwork, jwork_size) ;
GB_FREE (&Swork, Swork_size) ;
GB_FREE_IN_PLACE_A ;
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// Ai is used as workspace for the "column" indices of C.
// jwork is a shallow copy of Ai, and is freed by GB_builder.
jwork = Ai ;
jwork_size = Ai_size ;
ASSERT (in_place) ;
// set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A
Ai = NULL ;
}
else
{
// jwork = Ai, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads_max) ;
}
// numerical values: apply the op, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Swork = op (A)
info = GB_apply_op ( // op1 != identity of same types
(GB_void *) Swork, op1, op2, scalar, binop_bind1st,
A, Context) ;
// GB_apply_op can only fail if op1/op2 are positional
ASSERT (!GB_OP_IS_POSITIONAL (op1)) ;
ASSERT (!GB_OP_IS_POSITIONAL (op2)) ;
ASSERT (info == GrB_SUCCESS) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
scode = ccode ;
#if 0
if (in_place && !Ax_shallow)
{
// A is being transposed in-place so A->x is no longer
// needed. If A->x is shallow this can be skipped. T->x
// will not be shallow if the op is present. A->x should
// be freed early to free up space for GB_builder.
// However, in the current usage, when op is used, A is not
// transposed in-place, so this step is not needed.
ASSERT (GB_DEAD_CODE) ;
GB_FREE (&Ax, A->x_size) ;
}
#endif
}
else
{
// GB_builder will typecast S from atype to ctype if needed.
// S is a shallow copy of Ax, and must not be modified.
S = Ax ;
scode = acode ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total high-water memory usage is anz * max (csize,
// sizeof(int64_t)). T is always hypersparse.
// If op is not NULL, then Swork can be transplanted into T in
// GB_builder, instead. However, this requires the tuples to be
// sorted on input, which is possible but rare for GB_transpose.
info = GB_builder
(
T, // create T using a static header
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&iwork_size,
&jwork, // jwork_handle, freed on output
&jwork_size,
&Swork, // Swork_handle, freed on output
&Swork_size,
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
NULL, NULL, // original I,J indices: not used here
S, // array of values of type scode, not modified
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
scode, // type of S or Swork
Context
) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify S.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
//------------------------------------------------------------------
// free prior space and transplant T into C
//------------------------------------------------------------------
// Free the prior content of the input matrix, if done in-place.
// Ap, Ah, and Ai have already been freed, but Ax has not.
GB_FREE_IN_PLACE_A ;
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_C ;
return (info) ;
}
// Transplant T in to the result C. The matrix T is not shallow
// and no typecasting is done, so this will always succeed.
ASSERT (!GB_JUMBLED (T)) ;
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
//==================================================================
// transpose via bucket sort
//==================================================================
// This method does not operate on the matrix in-place, so it must
// create a temporary matrix T. Then the input matrix is freed and
// replaced with the new matrix T.
// T = A' and typecast to ctype
info = GB_transpose_bucket (T, ctype, C_is_csc, A,
op1, op2, scalar, binop_bind1st,
nworkspaces_bucket, nthreads_bucket, Context) ;
// free prior content, if C=A' is being done in-place
if (in_place_A)
{
ASSERT (A == (*Chandle)) ;
GB_phbix_free (A) ;
}
else if (in_place_C)
{
GB_phbix_free (C) ;
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_transpose_bucket
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (T, "T from bucket", GB0) ;
ASSERT (GB_JUMBLED_OK (T)) ;
// allocate the output matrix C (just the header)
// if *Chandle == NULL, allocate a new header; otherwise reuse
info = GB_new (Chandle, C_static_header, // old/new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GxB_SPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place,
ASSERT (!C_static_header) ; // or if C has a static header
GB_FREE_C ;
GB_phbix_free (T) ;
return (info) ;
}
// Transplant T back into C and free T. T is not shallow and
// no typecast is done so this will always succeed.
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
}
//--------------------------------------------------------------------------
// get the output matrix
//--------------------------------------------------------------------------
C = (*Chandle) ;
ASSERT (GB_JUMBLED_OK (C)) ;
//--------------------------------------------------------------------------
// apply a positional operator, after transposing the matrix
//--------------------------------------------------------------------------
if (op_is_positional)
{
// the positional operator is applied in-place to the values of C
op1 = save_op1 ;
op2 = save_op2 ;
// Cx = op (C)
info = GB_apply_op ((GB_void *) C->x, op1, // positional op only
op2, scalar, binop_bind1st, C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
}
//--------------------------------------------------------------------------
// conform the result to the desired sparisty structure of A
//--------------------------------------------------------------------------
// transplant the control settings from A to C
C->hyper_switch = A_hyper_switch ;
C->bitmap_switch = A_bitmap_switch ;
C->sparsity = A_sparsity ;
ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ;
info = GB_conform (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "Chandle conformed in GB_transpose", GB0) ;
return (GrB_SUCCESS) ;
}
|
ignored.c | // RUN: %compile-run-and-check
#include <omp.h>
#include <stdio.h>
const int MaxThreads = 1024;
int main(int argc, char *argv[]) {
int cancellation = -1, dynamic = -1, nested = -1, maxActiveLevels = -1;
#pragma omp target map(cancellation, dynamic, nested, maxActiveLevels)
{
// libomptarget-nvptx doesn't support cancellation.
cancellation = omp_get_cancellation();
// No support for dynamic adjustment of the number of threads.
omp_set_dynamic(1);
dynamic = omp_get_dynamic();
// libomptarget-nvptx doesn't support nested parallelism.
omp_set_nested(1);
nested = omp_get_nested();
omp_set_max_active_levels(42);
maxActiveLevels = omp_get_max_active_levels();
}
// CHECK: cancellation = 0
printf("cancellation = %d\n", cancellation);
// CHECK: dynamic = 0
printf("dynamic = %d\n", dynamic);
// CHECK: nested = 0
printf("nested = %d\n", nested);
// CHECK: maxActiveLevels = 1
printf("maxActiveLevels = %d\n", maxActiveLevels);
return 0;
}
|
owl_ndarray_pool_impl.h | /*
* OWL - OCaml Scientific Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifdef OWL_ENABLE_TEMPLATE
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int pr = 0, pc = 0;
if (padding != 1){
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, c);
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (spatial_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
const int ksize = kernel_cols * kernel_rows;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
if (pad_cols < 0) pad_cols = 0;
if (pad_rows < 0) pad_rows = 0;
memset(input_backward_ptr, 0,
batches * input_cols * input_rows * in_channel * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
idx[c++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < c; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, c);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, c);
#endif
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (cuboid) (
value vInput, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
memset(output_ptr, 0, batches * output_crdi * sizeof(TYPE));
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c){
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
counter++;
}
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, counter);
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid) (value * argv, int argn) {
return FUN_NATIVE (cuboid) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vCol_stride, value vRow_stride, value vDpt_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int col_stride = Long_val(vCol_stride);
int row_stride = Long_val(vRow_stride);
int dpt_stride = Long_val(vDpt_stride);
int padding = Long_val(vPadding);
const int ksize = kernel_cols * kernel_rows * kernel_dpts;
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
memset(input_backward_ptr, 0, batches * input_crdi * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
idx[counter++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < counter; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, counter);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, counter);
#endif
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
#ifdef OWL_NDARRAY_MAX
CAMLprim value FUN_NATIVE (spatial_arg) (
value vInput_ptr, value vOutput_ptr, value vArgmax_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
struct caml_ba_array *AG = Caml_ba_array_val(vArgmax_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int64_t *argmax_ptr = (int64_t *) AG->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
if (pad_rows < 0) pad_rows = 0.;
if (pad_cols < 0) pad_cols = 0.;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
memset(argmax_ptr, 0, batches * output_cri * sizeof(int64_t));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int max_idx = -1;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = acc;
*(argmax_ptr + output_idx) = (int64_t) max_idx;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_arg) (value * argv, int argn) {
return FUN_NATIVE (spatial_arg) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
#endif /* OWL_NDARRAY_MAX */
#endif /* OWL_ENABLE_TEMPLATE */
|
GB_unop__sin_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sin_fp32_fp32)
// op(A') function: GB (_unop_tran__sin_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sin_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sin_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickCore Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/geometry.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/semaphore-private.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
Global declarations.
*/
static MonitorHandler
monitor_handler = (MonitorHandler) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualIndexQueue(cache_view);
%
% The format of the AcquireCacheViewIndexes method is:
%
% const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport const IndexPacket *AcquireCacheViewIndexes(
const CacheView *cache_view)
{
return(GetCacheViewVirtualIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception);
%
% The format of the AcquireCacheViewPixels method is:
%
% const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireCacheViewPixels(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImagePixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in RAM, or in a memory-mapped file. The
% returned pointer should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not
% thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% Deprecated, replace with:
%
% GetVirtualPixels(image,x,y,columns,rows,exception);
%
% The format of the AcquireImagePixels() method is:
%
% const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireImagePixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
return(GetVirtualPixels(image,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireIndexes() returns the black channel or the colormap indexes
% associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% Deprecated, replace with:
%
% GetVirtualIndexQueue(image);
%
% The format of the AcquireIndexes() method is:
%
% const IndexPacket *AcquireIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: AcquireIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetVirtualPixels().
%
% o image: the image.
%
*/
MagickExport const IndexPacket *AcquireIndexes(const Image *image)
{
return(GetVirtualIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMemory() returns a pointer to a block of memory at least size bytes
% suitably aligned for any use.
%
% The format of the AcquireMemory method is:
%
% void *AcquireMemory(const size_t size)
%
% A description of each parameter follows:
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *AcquireMemory(const size_t size)
{
void
*allocation;
assert(size != 0);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
allocation=malloc(size);
return(allocation);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewPixel(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
% x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewVirtualPixel(
% const CacheView *cache_view,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel(
const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
x,y,pixel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% MagickPixelPacket pixel;
% GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOneMagickPixel() method is:
%
% MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
(void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs. If you plan to
% modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOnePixel() method is:
%
% PixelPacket AcquireOnePixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x,
const ssize_t y,ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneVirtualPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception);
%
% The format of the AcquireOneVirtualPixel() method is:
%
% PixelPacket AcquireOneVirtualPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o virtual_pixel_method: the virtual pixel method.
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,
exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetVirtualPixels().
%
% Deprecated, replace with:
%
% GetVirtualPixelQueue(image);
%
% The format of the AcquirePixels() method is:
%
% const PixelPacket *AcquirePixels(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *AcquirePixels(const Image *image)
{
return(GetVirtualPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireSemaphoreInfo() acquires a semaphore.
%
% The format of the AcquireSemaphoreInfo method is:
%
% void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo **) NULL);
if (*semaphore_info == (SemaphoreInfo *) NULL)
{
InitializeMagickMutex();
LockMagickMutex();
if (*semaphore_info == (SemaphoreInfo *) NULL)
*semaphore_info=AllocateSemaphoreInfo();
UnlockMagickMutex();
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImage() replaces the colors of an image with the closest color from
% a reference image.
%
% Deprecated, replace with:
%
% RemapImage(quantize_info,image,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
Image *image,const Image *affinity_image)
{
return(RemapImage(quantize_info,image,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% Deprecated, replace with:
%
% RemapImages(quantize_info,images,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
% Image *images,Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
Image *images,const Image *affinity_image)
{
return(RemapImages(quantize_info,images,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImage() returns a pointer to an image structure initialized to
% default values.
%
% Deprecated, replace with:
%
% AcquireImage(image_info);
%
% The format of the AllocateImage method is:
%
% Image *AllocateImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AllocateImage(const ImageInfo *image_info)
{
return(AcquireImage(image_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AllocateImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% Deprecated, replace with:
%
% AcquireImageColormap(image,colors);
%
% The format of the AllocateImageColormap method is:
%
% MagickBooleanType AllocateImageColormap(Image *image,
% const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
MagickExport MagickBooleanType AllocateImageColormap(Image *image,
const size_t colors)
{
return(AcquireImageColormap(image,colors));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% Deprecated, replace with:
%
% AcquireNextImage(image_info,image);
%
% The format of the AllocateNextImage method is:
%
% void AllocateNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image)
{
AcquireNextImage(image_info,image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateString() allocates memory for a string and copies the source string
% to that memory location (and returns it).
%
% The format of the AllocateString method is:
%
% char *AllocateString(const char *source)
%
% A description of each parameter follows:
%
% o source: A character string.
%
*/
MagickExport char *AllocateString(const char *source)
{
char
*destination;
size_t
length;
assert(source != (const char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
length=strlen(source)+MaxTextExtent+1;
destination=(char *) AcquireQuantumMemory(length,sizeof(*destination));
if (destination == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*destination='\0';
if (source != (char *) NULL)
(void) CopyMagickString(destination,source,length);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AverageImages() takes a set of images and averages them together. Each
% image in the set must have the same width and height. AverageImages()
% returns a single image with each corresponding pixel component of each
% image averaged. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MeanEvaluateOperator,exception);
%
% The format of the AverageImages method is:
%
% Image *AverageImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MeanEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Extract a channel from the image. A channel is a particular color component
% of each pixel in the image.
%
% Deprecated, replace with:
%
% SeparateImageChannel(image,channel);
%
% The format of the ChannelImage method is:
%
% unsigned int ChannelImage(Image *image,const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel,
% or BlackChannel.
%
*/
MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel)
{
return(SeparateImageChannel(image,channel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ChannelThresholdImage method is:
%
% unsigned int ChannelThresholdImage(Image *image,const char *level)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: define the threshold values.
%
*/
MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level)
{
MagickPixelPacket
threshold;
GeometryInfo
geometry_info;
unsigned int
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
status=BilevelImageChannel(image,RedChannel,threshold.red);
status&=BilevelImageChannel(image,GreenChannel,threshold.green);
status&=BilevelImageChannel(image,BlueChannel,threshold.blue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPathImage() sets the image clip mask based any clipping path information
% if it exists.
%
% Deprecated, replace with:
%
% ClipImagePath(image,pathname,inside);
%
% The format of the ClipImage method is:
%
% MagickBooleanType ClipPathImage(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname,
const MagickBooleanType inside)
{
return(ClipImagePath(image,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageAttributes() clones one or more image attributes.
%
% Deprecated, replace with:
%
% CloneImageProperties(image,clone_image);
%
% The format of the CloneImageAttributes method is:
%
% MagickBooleanType CloneImageAttributes(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageAttributes(Image *image,
const Image *clone_image)
{
return(CloneImageProperties(image,clone_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneMemory() copies size bytes from memory area source to the destination.
% Copying between objects that overlap will take place correctly. It returns
% destination.
%
% The format of the CloneMemory method is:
%
% void *CloneMemory(void *destination,const void *source,
% const size_t size)
%
% A description of each parameter follows:
%
% o destination: the destination.
%
% o source: the source.
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *CloneMemory(void *destination,const void *source,
const size_t size)
{
register const unsigned char
*p;
register unsigned char
*q;
register ssize_t
i;
assert(destination != (void *) NULL);
assert(source != (const void *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
p=(const unsigned char *) source;
q=(unsigned char *) destination;
if ((p <= q) || ((p+size) >= q))
return(CopyMagickMemory(destination,source,size));
/*
Overlap, copy backwards.
*/
p+=size;
q+=size;
for (i=(ssize_t) (size-1); i >= 0; i--)
*--q=(*--p);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o s e C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloseCacheView() closes the specified view returned by a previous call to
% OpenCacheView().
%
% Deprecated, replace with:
%
% DestroyCacheView(view_info);
%
% The format of the CloseCacheView method is:
%
% CacheView *CloseCacheView(CacheView *view_info)
%
% A description of each parameter follows:
%
% o view_info: the address of a structure of type CacheView.
%
*/
MagickExport CacheView *CloseCacheView(CacheView *view_info)
{
return(DestroyCacheView(view_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the ColorFloodfillImage method is:
%
% MagickBooleanType ColorFloodfillImage(Image *image,
% const DrawInfo *draw_info,const PixelPacket target,
% const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
#define MaxStacksize (1UL << 15)
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
MagickExport MagickBooleanType ColorFloodfillImage(Image *image,
const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
PixelPacket
fill_color;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
}
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentGenesis() instantiates the constitute component.
%
% The format of the ConstituteComponentGenesis method is:
%
% MagickBooleanType ConstituteComponentGenesis(void)
%
*/
MagickExport MagickBooleanType ConstituteComponentGenesis(void)
{
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentTerminus() destroys the constitute component.
%
% The format of the ConstituteComponentTerminus method is:
%
% ConstituteComponentTerminus(void)
%
*/
MagickExport void ConstituteComponentTerminus(void)
{
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageAttribute() deletes an attribute from the image.
%
% Deprecated, replace with:
%
% DeleteImageProperty(image,key);
%
% The format of the DeleteImageAttribute method is:
%
% MagickBooleanType DeleteImageAttribute(Image *image,const char *key)
%
% A description of each parameter follows:
%
% o image: the image info.
%
% o key: the image key.
%
*/
MagickExport MagickBooleanType DeleteImageAttribute(Image *image,
const char *key)
{
return(DeleteImageProperty(image,key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageList() deletes an image at the specified position in the list.
%
% The format of the DeleteImageList method is:
%
% unsigned int DeleteImageList(Image *images,const ssize_t offset)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
*/
MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset)
{
register ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return(MagickFalse);
images=GetNextImageInList(images);
}
DeleteImageFromList(&images);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteMagickRegistry() deletes an entry in the registry as defined by the id.
% It returns MagickTrue if the entry is deleted otherwise MagickFalse if no
% entry is found in the registry that matches the id.
%
% Deprecated, replace with:
%
% char key[MaxTextExtent];
% FormatLocaleString(key,MaxTextExtent,"%ld\n",id);
% DeleteImageRegistry(key);
%
% The format of the DeleteMagickRegistry method is:
%
% MagickBooleanType DeleteMagickRegistry(const ssize_t id)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
*/
MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id)
{
char
key[MaxTextExtent];
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
return(DeleteImageRegistry(key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C o n s t i t u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyConstitute() destroys the constitute component.
%
% The format of the DestroyConstitute method is:
%
% DestroyConstitute(void)
%
*/
MagickExport void DestroyConstitute(void)
{
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagickRegistry() deallocates memory associated the magick registry.
%
% Deprecated, replace with:
%
% RegistryComponentTerminus();
%
% The format of the DestroyMagickRegistry method is:
%
% void DestroyMagickRegistry(void)
%
*/
MagickExport void DestroyMagickRegistry(void)
{
RegistryComponentTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DescribeImage() describes an image by printing its attributes to the file.
% Attributes include the image width, height, size, and others.
%
% Deprecated, replace with:
%
% IdentifyImage(image,file,verbose);
%
% The format of the DescribeImage method is:
%
% MagickBooleanType DescribeImage(Image *image,FILE *file,
% const MagickBooleanType verbose)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o file: the file, typically stdout.
%
% o verbose: A value other than zero prints more detailed information
% about the image.
%
*/
MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file,
const MagickBooleanType verbose)
{
return(IdentifyImage(image,file,verbose));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageAttributes() deallocates memory associated with the image
% attribute list.
%
% The format of the DestroyImageAttributes method is:
%
% DestroyImageAttributes(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageAttributes(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->attributes != (void *) NULL)
image->attributes=(void *) DestroySplayTree((SplayTreeInfo *)
image->attributes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImages() destroys an image list.
%
% Deprecated, replace with:
%
% DestroyImageList(image);
%
% The format of the DestroyImages method is:
%
% void DestroyImages(Image *image)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
*/
MagickExport void DestroyImages(Image *image)
{
if (image == (Image *) NULL)
return;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
image=DestroyImageList(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagick() destroys the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreTerminus();
%
% The format of the DestroyMagick function is:
%
% DestroyMagick(void)
%
*/
MagickExport void DestroyMagick(void)
{
MagickCoreTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s p a t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DispatchImage() extracts pixel data from an image and returns it to you.
% The method returns MagickFalse on success otherwise MagickTrue if an error is
% encountered. The data is returned as char, short int, int, ssize_t, float,
% or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception);
%
% Deprecated, replace with:
%
% ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
% exception);
%
% The format of the DispatchImage method is:
%
% unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,const size_t columns,
% const size_t rows,const char *map,const StorageType type,
% void *pixels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset, y_offset, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or
% I = intensity (for grayscale).
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or
% DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const char *map,const StorageType type,void *pixels,ExceptionInfo *exception)
{
unsigned int
status;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t r a c t S u b i m a g e F r o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtractSubimageFromImageImage() extracts a region of the image that most
% closely resembles the reference.
%
% The format of the ExtractSubimageFromImageImage method is:
%
% Image *ExtractSubimageFromImage(const Image *image,
% const Image *reference,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const ssize_t x_offset,const ssize_t y_offset,
const double similarity_threshold,ExceptionInfo *exception)
{
CacheView
*image_view,
*reference_view;
double
channels,
normalized_similarity,
similarity;
ssize_t
y;
/*
Compute the similarity in pixels between two images.
*/
normalized_similarity=1.0;
similarity=0.0;
channels=3;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
channels++;
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
channels++;
image_view=AcquireVirtualCacheView(image,exception);
reference_view=AcquireVirtualCacheView(reference,exception);
for (y=0; y < (ssize_t) reference->rows; y++)
{
register const IndexPacket
*indexes,
*reference_indexes;
register const PixelPacket
*p,
*q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y,
reference->columns,1,exception);
q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reference_indexes=GetCacheViewVirtualIndexQueue(reference_view);
for (x=0; x < (ssize_t) reference->columns; x++)
{
MagickRealType
pixel;
pixel=QuantumScale*(GetPixelRed(p)-(double)
GetPixelRed(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(p)-(double)
GetPixelGreen(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(p)-(double)
GetPixelBlue(q));
similarity+=pixel*pixel;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
{
pixel=QuantumScale*(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
similarity+=pixel*pixel;
}
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
{
pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reference_indexes+x));
similarity+=pixel*pixel;
}
p++;
q++;
}
normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/
channels;
if (normalized_similarity > similarity_threshold)
break;
}
reference_view=DestroyCacheView(reference_view);
image_view=DestroyCacheView(image_view);
return(normalized_similarity);
}
MagickExport Image *ExtractSubimageFromImage(Image *image,
const Image *reference,ExceptionInfo *exception)
{
double
similarity_threshold;
RectangleInfo
offset;
ssize_t
y;
/*
Extract reference from image.
*/
if ((reference->columns > image->columns) || (reference->rows > image->rows))
return((Image *) NULL);
similarity_threshold=(double) image->columns*image->rows;
SetGeometry(reference,&offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows); y++)
{
double
similarity;
register ssize_t
x;
for (x=0; x < (ssize_t) (image->columns-reference->columns); x++)
{
similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold,
exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExtractSubimageFromImage)
#endif
if (similarity < similarity_threshold)
{
similarity_threshold=similarity;
offset.x=x;
offset.y=y;
}
}
}
if (similarity_threshold > (QuantumScale*reference->fuzz/100.0))
return((Image *) NULL);
return(CropImage(image,&offset,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l a t t e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlattenImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,FlattenLayer,exception);
%
% The format of the FlattenImage method is:
%
% Image *FlattenImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,FlattenLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatImageAttribute() permits formatted key/value pairs to be saved as an
% image attribute.
%
% The format of the FormatImageAttribute method is:
%
% MagickBooleanType FormatImageAttribute(Image *image,const char *key,
% const char *format,...)
%
% A description of each parameter follows.
%
% o image: The image.
%
% o key: The attribute key.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport MagickBooleanType FormatImageAttributeList(Image *image,
const char *key,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,key,value));
}
MagickExport MagickBooleanType FormatImagePropertyList(Image *image,
const char *property,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,property,value));
}
MagickExport MagickBooleanType FormatImageAttribute(Image *image,
const char *key,const char *format,...)
{
char
value[MaxTextExtent];
int
n;
va_list
operands;
va_start(operands,format);
n=FormatLocaleStringList(value,MaxTextExtent,format,operands);
(void) n;
va_end(operands);
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t M a g i c k S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatMagickString() prints formatted output of a variable argument list.
%
% The format of the FormatMagickString method is:
%
% ssize_t FormatMagickString(char *string,const size_t length,
% const char *format,...)
%
% A description of each parameter follows.
%
% o string: FormatMagickString() returns the formatted string in this
% character buffer.
%
% o length: the maximum length of the string.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport ssize_t FormatMagickStringList(char *string,const size_t length,
const char *format,va_list operands)
{
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,length,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[length-1]='\0';
return((ssize_t) n);
}
MagickExport ssize_t FormatMagickString(char *string,const size_t length,
const char *format,...)
{
ssize_t
n;
va_list
operands;
va_start(operands,format);
n=(ssize_t) FormatMagickStringList(string,length,format,operands);
va_end(operands);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatString() prints formatted output of a variable argument list.
%
% The format of the FormatString method is:
%
% void FormatString(char *string,const char *format,...)
%
% A description of each parameter follows.
%
% o string: Method FormatString returns the formatted string in this
% character buffer.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport void FormatStringList(char *string,const char *format,
va_list operands)
{
int
n;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,MaxTextExtent,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[MaxTextExtent-1]='\0';
}
MagickExport void FormatString(char *string,const char *format,...)
{
va_list
operands;
va_start(operands,format);
(void) FormatLocaleStringList(string,MaxTextExtent,format,operands);
va_end(operands);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r M a t c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorMatch() returns true if two pixels are identical in color.
%
% The format of the ColorMatch method is:
%
% void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q,
% const double fuzz)
%
% A description of each parameter follows:
%
% o p: Pixel p.
%
% o q: Pixel q.
%
% o distance: Define how much tolerance is acceptable to consider
% two colors as the same.
%
*/
MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p,
const PixelPacket *q,const double fuzz)
{
MagickPixelPacket
pixel;
register MagickRealType
distance;
if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) &&
(GetPixelGreen(p) == GetPixelGreen(q)) &&
(GetPixelBlue(p) == GetPixelBlue(q)))
return(MagickTrue);
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
distance=pixel.red*pixel.red;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.green=GetPixelGreen(p)-(MagickRealType)
GetPixelGreen(q);
distance+=pixel.green*pixel.green;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q);
distance+=pixel.blue*pixel.blue;
if (distance > (fuzz*fuzz))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorCompare() returns MagickTrue if the distance between two colors is
% less than the specified distance in a linear three dimensional color space.
% This method is used by ColorFloodFill() and other algorithms which
% compare two colors.
%
% The format of the FuzzyColorCompare method is:
%
% void FuzzyColorCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyColorCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsColorSimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y O p a c i t y C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyOpacityCompare() returns true if the distance between two opacity
% values is less than the specified distance in a linear color space. This
% method is used by MatteFloodFill() and other algorithms which compare
% two opacity values.
%
% Deprecated, replace with:
%
% IsOpacitySimilar(image,p,q);
%
% The format of the FuzzyOpacityCompare method is:
%
% void FuzzyOpacityCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsOpacitySimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C o n f i g u r e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetConfigureBlob() returns the specified configure file as a blob.
%
% The format of the GetConfigureBlob method is:
%
% void *GetConfigureBlob(const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o filename: the configure file name.
%
% o path: return the full path information of the configure file.
%
% o length: This pointer to a size_t integer sets the initial length of the
% blob. On return, it reflects the actual length of the blob.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetConfigureBlob(const char *filename,char *path,
size_t *length,ExceptionInfo *exception)
{
void
*blob;
assert(filename != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(path != (char *) NULL);
assert(length != (size_t *) NULL);
assert(exception != (ExceptionInfo *) NULL);
blob=(void *) NULL;
(void) CopyMagickString(path,filename,MaxTextExtent);
#if defined(MAGICKCORE_INSTALLED_SUPPORT)
#if defined(MAGICKCORE_LIBRARY_PATH)
if (blob == (void *) NULL)
{
/*
Search hard coded paths.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s",
MAGICKCORE_LIBRARY_PATH,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
#endif
#if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH))
if (blob == (void *) NULL)
{
unsigned char
*key_value;
/*
Locate file via registry key.
*/
key_value=NTRegistryKeyLookup("ConfigurePath");
if (key_value != (unsigned char *) NULL)
{
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *)
key_value,DirectorySeparator,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
}
#endif
#else
if (blob == (void *) NULL)
{
char
*home;
home=GetEnvironmentValue("MAGICK_HOME");
if (home != (char *) NULL)
{
/*
Search MAGICK_HOME.
*/
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home,
DirectorySeparator,filename);
#else
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
home=GetEnvironmentValue("HOME");
if (home == (char *) NULL)
home=GetEnvironmentValue("USERPROFILE");
if (home != (char *) NULL)
{
/*
Search $HOME/.magick.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home,
DirectorySeparator,DirectorySeparator,filename);
if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL))
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
}
if ((blob == (void *) NULL) && (*GetClientPath() != '\0'))
{
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(),
DirectorySeparator,filename);
#else
char
prefix[MaxTextExtent];
/*
Search based on executable directory if directory is known.
*/
(void) CopyMagickString(prefix,GetClientPath(),
MaxTextExtent);
ChopPathComponents(prefix,1);
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
/*
Search current directory.
*/
if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse))
blob=FileToBlob(path,~0UL,length,exception);
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
Search Windows registry.
*/
if (blob == (void *) NULL)
blob=NTResourceToBlob(filename);
#endif
#endif
if (blob == (void *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning,
"UnableToOpenConfigureFile","`%s'",path);
return(blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheView() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheView method is:
%
% PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the address of a structure of type CacheView.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticIndexQueue(cache_view);
%
% The format of the GetCacheViewIndexes method is:
%
% IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
{
return(GetCacheViewAuthenticIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheViewPixels method is:
%
% PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAttribute() searches the list of image attributes and returns
% a pointer to the attribute if it exists otherwise NULL.
%
% The format of the GetImageAttribute method is:
%
% const ImageAttribute *GetImageAttribute(const Image *image,
% const char *key)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: These character strings are the name of an image attribute to
% return.
%
*/
static void *DestroyAttribute(void *attribute)
{
register ImageAttribute
*p;
p=(ImageAttribute *) attribute;
if (p->value != (char *) NULL)
p->value=DestroyString(p->value);
return(RelinquishMagickMemory(p));
}
MagickExport const ImageAttribute *GetImageAttribute(const Image *image,
const char *key)
{
const char
*value;
ImageAttribute
*attribute;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
value=GetImageProperty(image,key);
if (value == (const char *) NULL)
return((const ImageAttribute *) NULL);
if (image->attributes == (void *) NULL)
((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,DestroyAttribute);
else
{
const ImageAttribute
*attribute;
attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *)
image->attributes,key);
if (attribute != (const ImageAttribute *) NULL)
return(attribute);
}
attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute));
if (attribute == (ImageAttribute *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(attribute,0,sizeof(*attribute));
attribute->key=ConstantString(key);
attribute->value=ConstantString(value);
(void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes,
attribute->key,attribute);
return((const ImageAttribute *) attribute);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p p i n g P a t h A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClippingPathAttribute() searches the list of image attributes and
% returns a pointer to a clipping path if it exists otherwise NULL.
%
% Deprecated, replace with:
%
% GetImageAttribute(image,"8BIM:1999,2998");
%
% The format of the GetImageClippingPathAttribute method is:
%
% const ImageAttribute *GetImageClippingPathAttribute(Image *image)
%
% A description of each parameter follows:
%
% o attribute: Method GetImageClippingPathAttribute returns the clipping
% path if it exists otherwise NULL.
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image)
{
return(GetImageAttribute(image,"8BIM:1999,2998"));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F r o m M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFromMagickRegistry() gets an image from the registry as defined by
% its name. If the image is not found, a NULL image is returned.
%
% Deprecated, replace with:
%
% GetImageRegistry(ImageRegistryType,name,exception);
%
% The format of the GetImageFromMagickRegistry method is:
%
% Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o name: the name of the image to retrieve from the registry.
%
% o id: the registry id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
ExceptionInfo *exception)
{
*id=0L;
return((Image *) GetImageRegistry(ImageRegistryType,name,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickRegistry() gets a blob from the registry as defined by the id. If
% the blob that matches the id is not found, NULL is returned.
%
% The format of the GetMagickRegistry method is:
%
% const void *GetMagickRegistry(const ssize_t id,RegistryType *type,
% size_t *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
% o type: the registry type.
%
% o length: the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type,
size_t *length,ExceptionInfo *exception)
{
char
key[MaxTextExtent];
void
*blob;
*type=UndefinedRegistryType;
*length=0;
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
blob=(void *) GetImageRegistry(ImageRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
return((void *) GetImageRegistry(UndefinedRegistryType,key,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageGeometry() returns a region as defined by the geometry string with
% respect to the image and its gravity.
%
% Deprecated, replace with:
%
% if (size_to_fit != MagickFalse)
% ParseRegionGeometry(image,geometry,region_info,&image->exception); else
% ParsePageGeometry(image,geometry,region_info,&image->exception);
%
% The format of the GetImageGeometry method is:
%
% int GetImageGeometry(Image *image,const char *geometry,
% const unsigned int size_to_fit,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o flags: Method GetImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o size_to_fit: A value other than 0 means to scale the region so it
% fits within the specified width and height.
%
% o region_info: the region as defined by the geometry string with
% respect to the image and its gravity.
%
*/
MagickExport int GetImageGeometry(Image *image,const char *geometry,
const unsigned int size_to_fit,RectangleInfo *region_info)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4");
if (size_to_fit != MagickFalse)
return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception));
return((int) ParsePageGeometry(image,geometry,region_info,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageList() returns an image at the specified position in the list.
%
% Deprecated, replace with:
%
% CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
% exception);
%
% The format of the GetImageList method is:
%
% Image *GetImageList(const Image *images,const ssize_t offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageList(const Image *images,const ssize_t offset,
ExceptionInfo *exception)
{
Image
*image;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
exception);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListIndex() returns the position in the list of the specified
% image.
%
% Deprecated, replace with:
%
% GetImageIndexInList(images);
%
% The format of the GetImageListIndex method is:
%
% ssize_t GetImageListIndex(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport ssize_t GetImageListIndex(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageIndexInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListSize() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(images);
%
% The format of the GetImageListSize method is:
%
% size_t GetImageListSize(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport size_t GetImageListSize(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageListLength(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in RAM, or in a memory-mapped file. The returned pointer
% should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels()
% to obtain the black color component or colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% GetAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the GetImagePixels() method is:
%
% PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetIndexes() returns the black channel or the colormap indexes associated
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the black channel or colormap indexes are not available.
%
% Deprecated, replace with:
%
% GetAuthenticIndexQueue(image);
%
% The format of the GetIndexes() method is:
%
% IndexPacket *GetIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: GetIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetIndexes(const Image *image)
{
return(GetAuthenticIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <, >,
% and ~.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the GetMagickGeometry method is:
%
% unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,
ssize_t *y,size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImage() returns the next image in a list.
%
% Deprecated, replace with:
%
% GetNextImageInList(images);
%
% The format of the GetNextImage method is:
%
% Image *GetNextImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetNextImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetNextImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageAttribute() gets the next image attribute.
%
% Deprecated, replace with:
%
% const char *property;
% property=GetNextImageProperty(image);
% if (property != (const char *) NULL)
% GetImageAttribute(image,property);
%
% The format of the GetNextImageAttribute method is:
%
% const ImageAttribute *GetNextImageAttribute(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image)
{
const char
*property;
property=GetNextImageProperty(image);
if (property == (const char *) NULL)
return((const ImageAttribute *) NULL);
return(GetImageAttribute(image,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N u m b e r S c e n e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNumberScenes() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(image);
%
% The format of the GetNumberScenes method is:
%
% unsigned int GetNumberScenes(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport unsigned int GetNumberScenes(const Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return((unsigned int) GetImageListLength(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs.
%
% Deprecated, replace with:
%
% GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
%
% The format of the GetOnePixel() method is:
%
% PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
*/
MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y)
{
PixelPacket
pixel;
(void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetAuthenticPixels().
%
% Deprecated, replace with:
%
% GetAuthenticPixelQueue(image);
%
% The format of the GetPixels() method is:
%
% PixelPacket *GetPixels(const Image image)
%
% A description of each parameter follows:
%
% o pixels: GetPixels() returns the pixels associated with the last call
% to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetPixels(const Image *image)
{
return(GetAuthenticPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P r e v i o u s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPreviousImage() returns the previous image in a list.
%
% Deprecated, replace with:
%
% GetPreviousImageInList(images));
%
% The format of the GetPreviousImage method is:
%
% Image *GetPreviousImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetPreviousImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetPreviousImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H S L T r a n s f o r m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HSLTransform() converts a (hue, saturation, lightness) to a (red, green,
% blue) triple.
%
% The format of the HSLTransformImage method is:
%
% void HSLTransform(const double hue,const double saturation,
% const double lightness,Quantum *red,Quantum *green,Quantum *blue)
%
% A description of each parameter follows:
%
% o hue, saturation, lightness: A double value representing a
% component of the HSL color space.
%
% o red, green, blue: A pointer to a pixel component of type Quantum.
%
*/
static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2,
MagickRealType hue)
{
if (hue < 0.0)
hue+=1.0;
if (hue > 1.0)
hue-=1.0;
if ((6.0*hue) < 1.0)
return(m1+6.0*(m2-m1)*hue);
if ((2.0*hue) < 1.0)
return(m2);
if ((3.0*hue) < 2.0)
return(m1+6.0*(m2-m1)*(2.0/3.0-hue));
return(m1);
}
MagickExport void HSLTransform(const double hue,const double saturation,
const double lightness,Quantum *red,Quantum *green,Quantum *blue)
{
MagickRealType
b,
g,
r,
m1,
m2;
/*
Convert HSL to RGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
if (lightness <= 0.5)
m2=lightness*(saturation+1.0);
else
m2=lightness+saturation-lightness*saturation;
m1=2.0*lightness-m2;
r=HueToRGB(m1,m2,hue+1.0/3.0);
g=HueToRGB(m1,m2,hue);
b=HueToRGB(m1,m2,hue-1.0/3.0);
*red=ClampToQuantum((MagickRealType) QuantumRange*r);
*green=ClampToQuantum((MagickRealType) QuantumRange*g);
*blue=ClampToQuantum((MagickRealType) QuantumRange*b);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i t y A f f i n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentityAffine() initializes the affine transform to the identity matrix.
%
% The format of the IdentityAffine method is:
%
% IdentityAffine(AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o affine: A pointer the affine transform of type AffineMatrix.
%
*/
MagickExport void IdentityAffine(AffineMatrix *affine)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(affine != (AffineMatrix *) NULL);
(void) ResetMagickMemory(affine,0,sizeof(AffineMatrix));
affine->sx=1.0;
affine->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n i t i a l i z e M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeMagick() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the InitializeMagick function is:
%
% InitializeMagick(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void InitializeMagick(const char *path)
{
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelColor() applies bi-linear or tri-linear interpolation
% between a pixel and it's neighbors.
%
% The format of the InterpolatePixelColor method is:
%
% MagickPixelPacket InterpolatePixelColor(const Image *image,
% CacheView *view_info,InterpolatePixelMethod method,const double x,
% const double y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image cache view.
%
% o type: the type of pixel color interpolation.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx,
MagickPixelPacket *pixel)
{
MagickRealType
dx2,
p,
q,
r,
s;
dx2=dx*dx;
p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red);
q=(pixels[0].red-pixels[1].red)-p;
r=pixels[2].red-pixels[0].red;
s=pixels[1].red;
pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green);
q=(pixels[0].green-pixels[1].green)-p;
r=pixels[2].green-pixels[0].green;
s=pixels[1].green;
pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue);
q=(pixels[0].blue-pixels[1].blue)-p;
r=pixels[2].blue-pixels[0].blue;
s=pixels[1].blue;
pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity);
q=(pixels[0].opacity-pixels[1].opacity)-p;
r=pixels[2].opacity-pixels[0].opacity;
s=pixels[1].opacity;
pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
if (pixel->colorspace == CMYKColorspace)
{
p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index);
q=(pixels[0].index-pixels[1].index)-p;
r=pixels[2].index-pixels[0].index;
s=pixels[1].index;
pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
}
}
static inline MagickRealType CubicWeightingFunction(const MagickRealType x)
{
MagickRealType
alpha,
gamma;
alpha=MagickMax(x+2.0,0.0);
gamma=1.0*alpha*alpha*alpha;
alpha=MagickMax(x+1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
alpha=MagickMax(x+0.0,0.0);
gamma+=6.0*alpha*alpha*alpha;
alpha=MagickMax(x-1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
return(gamma/6.0);
}
static inline double MeshInterpolate(const PointInfo *delta,const double p,
const double x,const double y)
{
return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p);
}
static inline ssize_t NearestNeighbor(MagickRealType x)
{
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image,
CacheView *image_view,const InterpolatePixelMethod method,const double x,
const double y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image_view != (CacheView *) NULL);
GetMagickPixelPacket(image,&pixel);
switch (method)
{
case AverageInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
gamma=alpha[i];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*0.0625*pixels[i].red;
pixel.green+=gamma*0.0625*pixels[i].green;
pixel.blue+=gamma*0.0625*pixels[i].blue;
pixel.opacity+=0.0625*pixels[i].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*0.0625*pixels[i].index;
p++;
}
break;
}
case BicubicInterpolatePixel:
{
MagickPixelPacket
pixels[16],
u[4];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
GetMagickPixelPacket(image,u+i);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+4*i);
BicubicInterpolate(pixels+4*i,delta.x,u+i);
}
delta.y=y-floor(y);
BicubicInterpolate(u,delta.y,&pixel);
break;
}
case BilinearInterpolatePixel:
default:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y*
((1.0-delta.x)*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x*
pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x*
pixels[3].red));
pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x*
pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+
delta.x*pixels[3].green));
pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x*
pixels[3].blue));
pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x*
pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x*
pixels[3].opacity));
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x*
pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x*
pixels[3].index));
break;
}
case FilterInterpolatePixel:
{
Image
*excerpt_image,
*filter_image;
MagickPixelPacket
pixels[1];
RectangleInfo
geometry;
geometry.width=4L;
geometry.height=4L;
geometry.x=(ssize_t) floor(x)-1L;
geometry.y=(ssize_t) floor(y)-1L;
excerpt_image=ExcerptImage(image,&geometry,exception);
if (excerpt_image == (Image *) NULL)
break;
filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur,
exception);
excerpt_image=DestroyImage(excerpt_image);
if (filter_image == (Image *) NULL)
break;
p=GetVirtualPixels(filter_image,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
filter_image=DestroyImage(filter_image);
break;
}
indexes=GetVirtualIndexQueue(filter_image);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
filter_image=DestroyImage(filter_image);
break;
}
case IntegerInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case MeshInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[4];
MagickRealType
alpha[4];
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
luminance.x=MagickPixelLuma(pixels+0)-MagickPixelLuma(pixels+3);
luminance.y=MagickPixelLuma(pixels+1)-MagickPixelLuma(pixels+2);
if (fabs(luminance.x) < fabs(luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel:2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity,
pixels[3].opacity,pixels[0].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index,
pixels[3].index,pixels[0].index);
}
else
{
/*
Top-right triangle (pixel:1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity,
pixels[0].opacity,pixels[3].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index,
pixels[0].index,pixels[3].index);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity,
pixels[1].opacity,pixels[2].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index,
pixels[1].index,pixels[2].index);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity,
pixels[2].opacity,pixels[1].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index,
pixels[2].index,pixels[1].index);
}
}
break;
}
case NearestNeighborInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x),
NearestNeighbor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case SplineInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16],
dx,
dy;
PointInfo
delta;
ssize_t
j,
n;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
n=0;
delta.x=x-floor(x);
delta.y=y-floor(y);
for (i=(-1); i < 3L; i++)
{
dy=CubicWeightingFunction((MagickRealType) i-delta.y);
for (j=(-1); j < 3L; j++)
{
GetMagickPixelPacket(image,pixels+n);
SetMagickPixelPacket(image,p,indexes+n,pixels+n);
alpha[n]=1.0;
if (image->matte != MagickFalse)
{
alpha[n]=QuantumScale*((MagickRealType)
GetPixelAlpha(p));
pixels[n].red*=alpha[n];
pixels[n].green*=alpha[n];
pixels[n].blue*=alpha[n];
if (image->colorspace == CMYKColorspace)
pixels[n].index*=alpha[n];
}
dx=CubicWeightingFunction(delta.x-(MagickRealType) j);
gamma=alpha[n];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*dx*dy*pixels[n].red;
pixel.green+=gamma*dx*dy*pixels[n].green;
pixel.blue+=gamma*dx*dy*pixels[n].blue;
if (image->matte != MagickFalse)
pixel.opacity+=dx*dy*pixels[n].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*dx*dy*pixels[n].index;
n++;
p++;
}
}
break;
}
}
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageAttributes() replaces any embedded formatting characters with
% the appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the InterpretImageAttributes method is:
%
% char *InterpretImageAttributes(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *InterpretImageAttributes(const ImageInfo *image_info,
Image *image,const char *embed_text)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InversesRGBCompandor() removes the gamma function from a sRGB pixel.
%
% The format of the InversesRGBCompandor method is:
%
% MagickRealType InversesRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0404482362771076*QuantumRange))
return(pixel/12.92);
return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M a g i c k I n s t a n t i a t e d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMagickInstantiated() returns MagickTrue if the ImageMagick environment
% is currently instantiated: MagickCoreGenesis() has been called but
% MagickDestroy() has not.
%
% The format of the IsMagickInstantiated method is:
%
% MagickBooleanType IsMagickInstantiated(void)
%
*/
MagickExport MagickBooleanType IsMagickInstantiated(void)
{
return(IsMagickCoreInstantiated());
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s S u b i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsSubimage() returns MagickTrue if the geometry is a valid subimage
% specification (e.g. [1], [1-9], [1,7,4]).
%
% The format of the IsSubimage method is:
%
% unsigned int IsSubimage(const char *geometry,const unsigned int pedantic)
%
% A description of each parameter follows:
%
% o geometry: This string is the geometry specification.
%
% o pedantic: A value other than 0 invokes a more restrictive set of
% conditions for a valid specification (e.g. [1], [1-4], [4-1]).
%
*/
MagickExport unsigned int IsSubimage(const char *geometry,
const unsigned int pedantic)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((strchr(geometry,'x') != (char *) NULL) ||
(strchr(geometry,'X') != (char *) NULL))
return(MagickFalse);
if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() will map the given color to "black" and "white"
% values, limearly spreading out the colors, and level values on a channel by
% channel bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% Deprecated, replace with:
%
% LevelColorsImageChannel(image,channel,black_color,white_color,invert);
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
% const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
return(LevelColorsImageChannel(image,channel,black_color,white_color,invert));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateMemory() frees memory that has already been allocated, and NULL's
% the pointer to it.
%
% The format of the LiberateMemory method is:
%
% void LiberateMemory(void **memory)
%
% A description of each parameter follows:
%
% o memory: A pointer to a block of memory to free for reuse.
%
*/
MagickExport void LiberateMemory(void **memory)
{
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
return;
free(*memory);
*memory=(void *) NULL;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateSemaphoreInfo() relinquishes a semaphore.
%
% Deprecated, replace with:
%
% UnlockSemaphoreInfo(*semaphore_info);
%
% The format of the LiberateSemaphoreInfo method is:
%
% LiberateSemaphoreInfo(void **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
UnlockSemaphoreInfo(*semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k I n c a r n a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickIncarnate() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the MagickIncarnate function is:
%
% MagickIncarnate(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void MagickIncarnate(const char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o n i t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMonitor() calls the monitor handler method with a text string that
% describes the task and a measure of completion. The method returns
% MagickTrue on success otherwise MagickFalse if an error is encountered, e.g.
% if there was a user interrupt.
%
% The format of the MagickMonitor method is:
%
% MagickBooleanType MagickMonitor(const char *text,
% const MagickOffsetType offset,const MagickSizeType span,
% void *client_data)
%
% A description of each parameter follows:
%
% o offset: the position relative to the span parameter which represents
% how much progress has been made toward completing a task.
%
% o span: the span relative to completing a task.
%
% o client_data: the client data.
%
*/
MagickExport MagickBooleanType MagickMonitor(const char *text,
const MagickOffsetType offset,const MagickSizeType span,
void *magick_unused(client_data))
{
ExceptionInfo
*exception;
MagickBooleanType
status;
magick_unreferenced(client_data);
assert(text != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text);
ProcessPendingEvents(text);
status=MagickTrue;
exception=AcquireExceptionInfo();
if (monitor_handler != (MonitorHandler) NULL)
status=(*monitor_handler)(text,offset,span,exception);
exception=DestroyExceptionInfo(exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImage() replaces the colors of an image with the closest color from a
% reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImage(&quantize_info,image,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImage(Image *image,const Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the mapped image.
%
*/
MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(map_image != (Image *) NULL);
assert(map_image->signature == MagickSignature);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImage(&quantize_info,image,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImages() replaces the colors of a sequence of images with the closest
% color from a reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImages(&quantize_info,images,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImages(Image *images,Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a set of Image structures.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the quantized image.
%
*/
MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImages(&quantize_info,images,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatteFloodfill() changes the transparency value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod
% is specified, the transparency value is changed for any neighbor pixel
% that does not match the bordercolor member of image.
%
% By default target must match a particular pixel transparency exactly.
% However, in many cases two transparency values may differ by a
% small amount. The fuzz member of image defines how much tolerance is
% acceptable to consider two transparency values as the same. For example,
% set fuzz to 10 and the opacity values of 100 and 102 respectively are
% now interpreted as the same value for the purposes of the floodfill.
%
% The format of the MatteFloodfillImage method is:
%
% MagickBooleanType MatteFloodfillImage(Image *image,
% const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
% const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType MatteFloodfillImage(Image *image,
const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q--;
p--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q++;
p++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
q->opacity=opacity;
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaximumImages() returns the maximum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MaxImages method is:
%
% Image *MaximumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinimumImages() returns the minimum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MinimumImages method is:
%
% Image *MinimumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The algorithm was contributed by Mike Edmonds and implements an insertion
% sort for selecting median color-channel values. For more on this algorithm
% see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William
% Pugh in the June 1990 of Communications of the ACM.
%
% The format of the MedianFilterImage method is:
%
% Image *MedianFilterImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MedianFilterImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*median_image;
median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t)
radius,exception);
return(median_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModeImage() makes each pixel the 'predominant color' of the neighborhood
% of the specified radius.
%
% The format of the ModeImage method is:
%
% Image *ModeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ModeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*mode_image;
mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius,
exception);
return(mode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MosaicImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,MosaicLayer,exception);
%
% The format of the MosaicImage method is:
%
% Image *MosaicImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image list to be composited together
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,MosaicLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaqueImage method is:
%
% MagickBooleanType OpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType OpaqueImage(Image *image,
const PixelPacket target,const PixelPacket fill)
{
#define OpaqueImageTag "Opaque/Image"
MagickBooleanType
proceed;
register ssize_t
i;
ssize_t
y;
/*
Make image color opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Make DirectClass image opaque.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
*q=fill;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
break;
}
case PseudoClass:
{
/*
Make PseudoClass image opaque.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse)
image->colormap[i]=fill;
}
if (fill.opacity != OpaqueOpacity)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=fill.opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
(void) SyncImage(image);
break;
}
}
if (fill.opacity != OpaqueOpacity)
image->matte=MagickTrue;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenCacheView() opens a view into the pixel cache, using the
% VirtualPixelMethod that is defined within the given image itself.
%
% Deprecated, replace with:
%
% AcquireVirtualCacheView(image,&image->exception);
%
% The format of the OpenCacheView method is:
%
% CacheView *OpenCacheView(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheView *OpenCacheView(const Image *image)
{
return(AcquireVirtualCacheView(image,&((Image *) image)->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n M a g i c k S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenMagickStream() opens the file at the specified path and return the
% associated stream.
%
% The path of the OpenMagickStream method is:
%
% FILE *OpenMagickStream(const char *path,const char *mode)
%
% A description of each parameter follows.
%
% o path: the file path.
%
% o mode: the file mode.
%
*/
#if defined(MAGICKCORE_HAVE__WFOPEN)
static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16)
{
register const unsigned char
*p;
if (utf16 != (wchar_t *) NULL)
{
register wchar_t
*q;
wchar_t
c;
/*
Convert UTF-8 to UTF-16.
*/
q=utf16;
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
*q=(*p);
else
if ((*p & 0xE0) == 0xC0)
{
c=(*p);
*q=(c & 0x1F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
if ((*p & 0xF0) == 0xE0)
{
c=(*p);
*q=c << 12;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
c=(*p);
*q|=(c & 0x3F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
return(0);
q++;
}
*q++='\0';
return(q-utf16);
}
/*
Compute UTF-16 string length.
*/
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
;
else
if ((*p & 0xE0) == 0xC0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
if ((*p & 0xF0) == 0xE0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
return(0);
}
return(p-utf8);
}
static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source)
{
size_t
length;
wchar_t
*utf16;
length=UTF8ToUTF16(source,(wchar_t *) NULL);
if (length == 0)
{
register ssize_t
i;
/*
Not UTF-8, just copy.
*/
length=strlen((const char *) source);
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
for (i=0; i <= (ssize_t) length; i++)
utf16[i]=source[i];
return(utf16);
}
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
length=UTF8ToUTF16(source,utf16);
return(utf16);
}
#endif
MagickExport FILE *OpenMagickStream(const char *path,const char *mode)
{
FILE
*file;
if ((path == (const char *) NULL) || (mode == (const char *) NULL))
{
errno=EINVAL;
return((FILE *) NULL);
}
file=(FILE *) NULL;
#if defined(MAGICKCORE_HAVE__WFOPEN)
{
wchar_t
*unicode_mode,
*unicode_path;
unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path);
if (unicode_path == (wchar_t *) NULL)
return((FILE *) NULL);
unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode);
if (unicode_mode == (wchar_t *) NULL)
{
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
return((FILE *) NULL);
}
file=_wfopen(unicode_path,unicode_mode);
unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode);
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
}
#endif
if (file == (FILE *) NULL)
file=fopen(path,mode);
return(file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% Deprecated, replace with:
%
% FloodfillPaintImage(image,channel,draw_info,target,x,y,
% method == FloodfillMethod ? MagickFalse : MagickTrue);
%
% The format of the PaintFloodfillImage method is:
%
% MagickBooleanType PaintFloodfillImage(Image *image,
% const ChannelType channel,const MagickPixelPacket target,
% const ssize_t x,const ssize_t y,const DrawInfo *draw_info,
% const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o draw_info: the draw info.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType PaintFloodfillImage(Image *image,
const ChannelType channel,const MagickPixelPacket *target,const ssize_t x,
const ssize_t y,const DrawInfo *draw_info,const PaintMethod method)
{
MagickBooleanType
status;
status=FloodfillPaintImage(image,channel,draw_info,target,x,y,
method == FloodfillMethod ? MagickFalse : MagickTrue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% P a i n t O p a q u e I m a g e %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
% OpaquePaintImageChannel(image,channel,target,fill,MagickFalse);
%
% The format of the PaintOpaqueImage method is:
%
% MagickBooleanType PaintOpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill)
% MagickBooleanType PaintOpaqueImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType PaintOpaqueImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill)
{
MagickBooleanType
status;
status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
return(status);
}
MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill)
{
return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintTransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% TransparentPaintImage(image,target,opacity,MagickFalse);
%
% The format of the PaintTransparentImage method is:
%
% MagickBooleanType PaintTransparentImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType PaintTransparentImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity)
{
return(TransparentPaintImage(image,target,opacity,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P a r s e I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseImageGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <,
% and >.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the ParseImageGeometry method is:
%
% int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o flags: Method ParseImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o image_geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y,
size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return((int) ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a r s e S i z e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseSizeGeometry() returns a region as defined by the geometry string with
% respect to the image dimensions and aspect ratio.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
% ®ion_info->width,®ion_info->height);
%
% The format of the ParseSizeGeometry method is:
%
% MagickStatusType ParseSizeGeometry(const Image *image,
% const char *geometry,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o region_info: the region as defined by the geometry string.
%
*/
MagickExport MagickStatusType ParseSizeGeometry(const Image *image,
const char *geometry,RectangleInfo *region_info)
{
MagickStatusType
flags;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7");
SetGeometry(image,region_info);
flags=ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
®ion_info->width,®ion_info->height);
return(flags);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImageList() removes the last image in the list.
%
% Deprecated, replace with:
%
% RemoveLastImageFromList(images);
%
% The format of the PopImageList method is:
%
% Image *PopImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *PopImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveLastImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImagePixels() transfers one or more pixel components from the image pixel
% cache to a user supplied buffer. The pixels are returned in network byte
% order. MagickTrue is returned if the pixels are successfully transferred,
% otherwise MagickFalse.
%
% The format of the PopImagePixels method is:
%
% size_t PopImagePixels(Image *,const QuantumType quantum,
% unsigned char *destination)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (RGB, RGBA, etc).
%
% o destination: The components are transferred to this buffer.
%
*/
MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum,
unsigned char *destination)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
quantum,destination,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t s c r i p t G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PostscriptGeometry() replaces any page mneumonic with the equivalent size in
% picas.
%
% Deprecated, replace with:
%
% GetPageGeometry(page);
%
% The format of the PostscriptGeometry method is:
%
% char *PostscriptGeometry(const char *page)
%
% A description of each parameter follows.
%
% o page: Specifies a pointer to an array of characters.
% The string is either a Postscript page name (e.g. A4) or a postscript
% page geometry (e.g. 612x792+36+36).
%
*/
MagickExport char *PostscriptGeometry(const char *page)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return(GetPageGeometry(page));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImageList() adds an image to the end of the list.
%
% Deprecated, replace with:
%
% AppendImageToList(images,CloneImageList(image,exception));
%
% The format of the PushImageList method is:
%
% unsigned int PushImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int PushImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
AppendImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImagePixels() transfers one or more pixel components from a user
% supplied buffer into the image pixel cache of an image. The pixels are
% expected in network byte order. It returns MagickTrue if the pixels are
% successfully transferred, otherwise MagickFalse.
%
% The format of the PushImagePixels method is:
%
% size_t PushImagePixels(Image *image,const QuantumType quantum,
% const unsigned char *source)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (red, green, blue,
% opacity, RGB, or RGBA).
%
% o source: The pixel components are transferred from this buffer.
%
*/
MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum,
const unsigned char *source)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum,
source,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z a t i o n E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizationError() measures the difference between the original and
% quantized images. This difference is the total quantization error. The
% error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% Deprecated, replace with:
%
% GetImageQuantizeError(image);
%
% The format of the QuantizationError method is:
%
% unsigned int QuantizationError(Image *image)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
*/
MagickExport unsigned int QuantizationError(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(GetImageQuantizeError(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RadialBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
return(RotationalBlurImage(image,angle,exception));
}
MagickExport Image *RadialBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
return(RotationalBlurImageChannel(image,channel,angle,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% R a n d o m C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomChannelThresholdImage() changes the value of individual pixels based
% on the intensity of each pixel compared to a random threshold. The result
% is a low-contrast, two color image.
%
% The format of the RandomChannelThresholdImage method is:
%
% unsigned int RandomChannelThresholdImage(Image *image,
% const char *channel, const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing LOWxHIGH thresholds.
% If the string contains 2x2, 3x3, or 4x4, then an ordered
% dither of order 2, 3, or 4 will be performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int RandomChannelThresholdImage(Image *image,const char
*channel,const char *thresholds,ExceptionInfo *exception)
{
#define RandomChannelThresholdImageText " RandomChannelThreshold image... "
double
lower_threshold,
upper_threshold;
RandomInfo
*random_info;
ssize_t
count,
y;
static MagickRealType
o2[4]={0.2f, 0.6f, 0.8f, 0.4f},
o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f},
o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f,
1.2f, 0.4f, 0.9f, 1.3f, 0.2f},
threshold=128;
size_t
order;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (thresholds == (const char *) NULL)
return(MagickTrue);
lower_threshold=0;
upper_threshold=0;
if (LocaleCompare(thresholds,"2x2") == 0)
order=2;
else
if (LocaleCompare(thresholds,"3x3") == 0)
order=3;
else
if (LocaleCompare(thresholds,"4x4") == 0)
order=4;
else
{
order=1;
count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold,
&upper_threshold);
if (strchr(thresholds,'%') != (char *) NULL)
{
upper_threshold*=(.01*QuantumRange);
lower_threshold*=(.01*QuantumRange);
}
if (count == 1)
upper_threshold=(MagickRealType) QuantumRange-lower_threshold;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" RandomChannelThresholdImage: channel type=%s",channel);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold);
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfo();
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register IndexPacket
index,
*restrict indexes;
register PixelPacket
*restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
{
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (order == 1)
{
if (intensity < lower_threshold)
threshold=lower_threshold;
else if (intensity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)];
index=(IndexPacket) (intensity <= threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
}
if (LocaleCompare(channel,"opacity") == 0 ||
LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"matte") == 0)
{
if (image->matte != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
if (order == 1)
{
if ((MagickRealType) q->opacity < lower_threshold)
threshold=lower_threshold;
else if ((MagickRealType) q->opacity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7;
SetPixelOpacity(q,(MagickRealType) q->opacity <=
threshold ? 0 : QuantumRange);
q++;
}
}
else
{
/* To Do: red, green, blue, cyan, magenta, yellow, black */
if (LocaleCompare(channel,"intensity") != 0)
ThrowBinaryException(OptionError,"UnrecognizedChannelType",
image->filename);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
random_info=DestroyRandomInfo(random_info);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReacquireMemory() changes the size of the memory and returns a pointer to
% the (possibly moved) block. The contents will be unchanged up to the
% lesser of the new and old sizes.
%
% The format of the ReacquireMemory method is:
%
% void ReacquireMemory(void **memory,const size_t size)
%
% A description of each parameter follows:
%
% o memory: A pointer to a memory allocation. On return the pointer
% may change but the contents of the original allocation will not.
%
% o size: the new size of the allocated memory.
%
*/
MagickExport void ReacquireMemory(void **memory,const size_t size)
{
void
*allocation;
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
{
*memory=AcquireMagickMemory(size);
return;
}
allocation=realloc(*memory,size);
if (allocation == (void *) NULL)
*memory=RelinquishMagickMemory(*memory);
*memory=allocation;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RecolorImage() apply color transformation to an image. The method permits
% saturation changes, hue rotation, luminance to alpha, and various other
% effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the RecolorImage method is:
%
% Image *RecolorImage(const Image *image,const size_t order,
% const double *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o order: the number of columns and rows in the recolor matrix.
%
% o color_matrix: An array of double representing the recolor matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RecolorImage(const Image *image,const size_t order,
const double *color_matrix,ExceptionInfo *exception)
{
KernelInfo
*kernel_info;
Image
*recolor_image;
kernel_info=AcquireKernelInfo("1");
if (kernel_info == (KernelInfo *) NULL)
return((Image *) NULL);
kernel_info->width=order;
kernel_info->height=order;
kernel_info->values=(double *) color_matrix;
recolor_image=ColorMatrixImage(image,kernel_info,exception);
kernel_info->values=(double *) NULL;
kernel_info=DestroyKernelInfo(kernel_info);
return(recolor_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceNoiseImage() smooths the contours of an image while still preserving
% edge information. The algorithm works by replacing each pixel with its
% neighbor closest in value. A neighbor is defined by radius. Use a radius
% of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the ReduceNoiseImage method is:
%
% Image *ReduceNoiseImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReduceNoiseImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*reduce_image;
reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t)
radius,exception);
return(reduce_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n g u i s h S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishSemaphoreInfo() relinquishes a semaphore.
%
% The format of the RelinquishSemaphoreInfo method is:
%
% RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo *) NULL);
UnlockSemaphoreInfo(semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e A t t r i b u t e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageAttributeIterator() resets the image attributes iterator. Use it
% in conjunction with GetNextImageAttribute() to iterate over all the values
% associated with an image.
%
% Deprecated, replace with:
%
% ResetImagePropertyIterator(image);
%
% The format of the ResetImageAttributeIterator method is:
%
% ResetImageAttributeIterator(const ImageInfo *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageAttributeIterator(const Image *image)
{
ResetImagePropertyIterator(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the SetCacheViewPixels method is:
%
% PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t C a c h e T h e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheThreshold() sets the amount of free memory allocated for the pixel
% cache. Once this threshold is exceeded, all subsequent pixels cache
% operations are to/from disk.
%
% The format of the SetCacheThreshold() method is:
%
% void SetCacheThreshold(const size_t threshold)
%
% A description of each parameter follows:
%
% o threshold: the number of megabytes of memory available to the pixel
% cache.
%
*/
MagickExport void SetCacheThreshold(const size_t size)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
(void) SetMagickResourceLimit(MemoryResource,size*1024*1024);
(void) SetMagickResourceLimit(MapResource,2*size*1024*1024);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetExceptionInfo() sets the exception severity.
%
% The format of the SetExceptionInfo method is:
%
% MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
% ExceptionType severity)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
% o severity: the exception severity.
%
*/
MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
ExceptionType severity)
{
assert(exception != (ExceptionInfo *) NULL);
ClearMagickException(exception);
exception->severity=severity;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImage() sets the red, green, and blue components of each pixel to
% the image background color and the opacity component to the specified
% level of transparency. The background color is defined by the
% background_color member of the image.
%
% The format of the SetImage method is:
%
% void SetImage(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: Set each pixel to this level of transparency.
%
*/
MagickExport void SetImage(Image *image,const Quantum opacity)
{
PixelPacket
background_color;
ssize_t
y;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0");
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
background_color=image->background_color;
if (opacity != OpaqueOpacity)
background_color.opacity=opacity;
if (background_color.opacity != OpaqueOpacity)
{
(void) SetImageStorageClass(image,DirectClass);
image->matte=MagickTrue;
}
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
{
/*
Set colormapped or CMYK image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,0);
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
return;
}
/*
Set DirectClass image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAttribute() searches the list of image attributes and replaces the
% attribute value. If it is not found in the list, the attribute name
% and value is added to the list.
%
% Deprecated, replace with:
%
% SetImageProperty(image,key,value);
%
% The format of the SetImageAttribute method is:
%
% MagickBooleanType SetImageAttribute(Image *image,const char *key,
% const char *value)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: the key.
%
% o value: the value.
%
*/
MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key,
const char *value)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageList() inserts an image into the list at the specified position.
%
% The format of the SetImageList method is:
%
% unsigned int SetImageList(Image *images,const Image *image,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int SetImageList(Image **images,const Image *image,
const ssize_t offset,ExceptionInfo *exception)
{
Image
*clone;
register ssize_t
i;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(image,exception);
while (GetPreviousImageInList(*images) != (Image *) NULL)
(*images)=GetPreviousImageInList(*images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(*images) == (Image *) NULL)
return(MagickFalse);
(*images)=GetNextImageInList(*images);
}
InsertImageInList(images,clone);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImagePixels() queues a mutable pixel region.
% If the region is successfully initialized a pointer to a PixelPacket
% array representing the region is returned, otherwise NULL is returned.
% The returned pointer may point to a temporary working buffer for the
% pixels or it may point to the final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This useful while the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% SetImagePixels() any way it pleases. SetImagePixels() does not initialize
% the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in RAM, or in a
% memory-mapped file. The returned pointer should *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% QueueAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the SetImagePixels() method is:
%
% PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o pixels: SetImagePixels returns a pointer to the pixels if they are
% transferred, otherwise a NULL is returned.
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMagickRegistry() sets a blob into the registry and returns a unique ID.
% If an error occurs, -1 is returned.
%
% The format of the SetMagickRegistry method is:
%
% ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
% const size_t length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o type: the registry type.
%
% o blob: the address of a Binary Large OBject.
%
% o length: For a registry type of ImageRegistryType use sizeof(Image)
% otherise the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
const size_t magick_unused(length),ExceptionInfo *exception)
{
char
key[MaxTextExtent];
MagickBooleanType
status;
static ssize_t
id = 0;
magick_unreferenced(length);
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
status=SetImageRegistry(type,key,blob,exception);
if (status == MagickFalse)
return(-1);
return(id++);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M o n i t o r H a n d l e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMonitorHandler() sets the monitor handler to the specified method
% and returns the previous monitor handler.
%
% The format of the SetMonitorHandler method is:
%
% MonitorHandler SetMonitorHandler(MonitorHandler handler)
%
% A description of each parameter follows:
%
% o handler: Specifies a pointer to a method to handle monitors.
%
*/
MagickExport MonitorHandler GetMonitorHandler(void)
{
return(monitor_handler);
}
MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler)
{
MonitorHandler
previous_handler;
previous_handler=monitor_handler;
monitor_handler=handler;
return(previous_handler);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShiftImageList() removes an image from the beginning of the list.
%
% Deprecated, replace with:
%
% RemoveFirstImageFromList(images);
%
% The format of the ShiftImageList method is:
%
% Image *ShiftImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *ShiftImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveFirstImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S i z e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SizeBlob() returns the current length of the image file or blob.
%
% Deprecated, replace with:
%
% GetBlobSize(image);
%
% The format of the SizeBlob method is:
%
% off_t SizeBlob(Image *image)
%
% A description of each parameter follows:
%
% o size: Method SizeBlob returns the current length of the image file
% or blob.
%
% o image: the image.
%
*/
MagickExport MagickOffsetType SizeBlob(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
return((MagickOffsetType) GetBlobSize(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImageList() removes the images designated by offset and length from
% the list and replaces them with the specified list.
%
% The format of the SpliceImageList method is:
%
% Image *SpliceImageList(Image *images,const ssize_t offset,
% const size_t length,const Image *splices,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o length: the length of the image list to remove.
%
% o splice: Replace the removed image list with this list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImageList(Image *images,const ssize_t offset,
const size_t length,const Image *splices,ExceptionInfo *exception)
{
Image
*clone;
register ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(splices,exception);
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return((Image *) NULL);
images=GetNextImageInList(images);
}
(void) SpliceImageIntoList(&images,length,clone);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBCompandor() adds the gamma function to a sRGB pixel.
%
% The format of the sRGBCompandor method is:
%
% MagickRealType sRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0031306684425005883*QuantumRange))
return(12.92*pixel);
return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Strip() strips any whitespace or quotes from the beginning and end of a
% string of characters.
%
% The format of the Strip method is:
%
% void Strip(char *message)
%
% A description of each parameter follows:
%
% o message: Specifies an array of characters.
%
*/
MagickExport void Strip(char *message)
{
register char
*p,
*q;
assert(message != (char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*message == '\0')
return;
if (strlen(message) == 1)
return;
p=message;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if ((*p == '\'') || (*p == '"'))
p++;
q=message+strlen(message)-1;
while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p))
q--;
if (q > p)
if ((*q == '\'') || (*q == '"'))
q--;
(void) CopyMagickMemory(message,p,(size_t) (q-p+1));
message[q-p+1]='\0';
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheView() saves the cache view pixels to the in-memory or disk
% cache. It returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheView method is:
%
% MagickBooleanType SyncCacheView(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheViewPixels() saves the cache view pixels to the in-memory
% or disk cache. It returns MagickTrue if the pixel region is flushed,
% otherwise MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheViewPixels method is:
%
% MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncAuthenticPixels(image,&image->exception);
%
% The format of the SyncImagePixels() method is:
%
% MagickBooleanType SyncImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagePixels(Image *image)
{
return(SyncAuthenticPixels(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e m p o r a r y F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TemporaryFilename() replaces the contents of path by a unique path name.
%
% The format of the TemporaryFilename method is:
%
% void TemporaryFilename(char *path)
%
% A description of each parameter follows.
%
% o path: Specifies a pointer to an array of characters. The unique path
% name is returned in this array.
%
*/
MagickExport void TemporaryFilename(char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
(void) AcquireUniqueFilename(path);
(void) RelinquishUniqueFileResource(path);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel compared to threshold. The result is a
% high-contrast, two color image.
%
% The format of the ThresholdImage method is:
%
% unsigned int ThresholdImage(Image *image,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value
%
*/
MagickExport unsigned int ThresholdImage(Image *image,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
IndexPacket
index;
ssize_t
y;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (!AcquireImageColormap(image,2))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <=
threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImageChannel() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ThresholdImageChannel method is:
%
% unsigned int ThresholdImageChannel(Image *image,const char *threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
*/
MagickExport unsigned int ThresholdImageChannel(Image *image,
const char *threshold)
{
#define ThresholdImageTag "Threshold/Image"
MagickPixelPacket
pixel;
GeometryInfo
geometry_info;
IndexPacket
index;
ssize_t
y;
unsigned int
flags;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (threshold == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
flags=ParseGeometry(threshold,&geometry_info);
pixel.red=geometry_info.rho;
if (flags & SigmaValue)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if (flags & XiValue)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if (flags & PsiValue)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
if (flags & PercentValue)
{
pixel.red*=QuantumRange/100.0f;
pixel.green*=QuantumRange/100.0f;
pixel.blue*=QuantumRange/100.0f;
pixel.opacity*=QuantumRange/100.0f;
}
if (!(flags & SigmaValue))
{
if (!AcquireImageColormap(image,2))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
if (pixel.red == 0)
(void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
if (IsMagickGray(&pixel) != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRed(q,image->colormap[(ssize_t) index].red);
SetPixelGreen(q,image->colormap[(ssize_t) index].green);
SetPixelBlue(q,image->colormap[(ssize_t) index].blue);
q++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,(MagickRealType) q->red <= pixel.red
? 0 : QuantumRange);
SetPixelGreen(q,(MagickRealType) q->green <= pixel.green
? 0 : QuantumRange);
SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue
? 0 : QuantumRange);
SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity
? 0 : QuantumRange);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformColorspace() converts the image to a specified colorspace.
% If the image is already in the requested colorspace, no work is performed.
% Note that the current colorspace is stored in the image colorspace member.
% The transformation matrices are not necessarily the standard ones: the
% weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% Deprecated, replace with:
%
% TransformImageColorspace(image,colorspace);
%
% The format of the TransformColorspace method is:
%
% unsigned int (void) TransformColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image to transform
%
% o colorspace: the desired colorspace.
%
*/
MagickExport unsigned int TransformColorspace(Image *image,
const ColorspaceType colorspace)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
return(TransformImageColorspace(image,colorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m H S L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformHSL() converts a (red, green, blue) to a (hue, saturation,
% lightness) triple.
%
% The format of the TransformHSL method is:
%
% void TransformHSL(const Quantum red,const Quantum green,
% const Quantum blue,double *hue,double *saturation,double *lightness)
%
% A description of each parameter follows:
%
% o red, green, blue: A Quantum value representing the red, green, and
% blue component of a pixel..
%
% o hue, saturation, lightness: A pointer to a double value representing a
% component of the HSL color space.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport void TransformHSL(const Quantum red,const Quantum green,
const Quantum blue,double *hue,double *saturation,double *lightness)
{
MagickRealType
b,
delta,
g,
max,
min,
r;
/*
Convert RGB to HSL colorspace.
*/
assert(hue != (double *) NULL);
assert(saturation != (double *) NULL);
assert(lightness != (double *) NULL);
r=QuantumScale*red;
g=QuantumScale*green;
b=QuantumScale*blue;
max=MagickMax(r,MagickMax(g,b));
min=MagickMin(r,MagickMin(g,b));
*hue=0.0;
*saturation=0.0;
*lightness=(double) ((min+max)/2.0);
delta=max-min;
if (delta == 0.0)
return;
*saturation=(double) (delta/((*lightness < 0.5) ? (min+max) :
(2.0-max-min)));
if (r == max)
*hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta);
else
if (g == max)
*hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta);
else
*hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta);
*hue/=6.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s l a t e T e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TranslateText() replaces any embedded formatting characters with the
% appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the TranslateText method is:
%
% char *TranslateText(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *TranslateText(const ImageInfo *image_info,Image *image,
const char *embed_text)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentImage method is:
%
% MagickBooleanType TransparentImage(Image *image,
% const PixelPacket target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType TransparentImage(Image *image,
const PixelPacket target,const Quantum opacity)
{
#define TransparentImageTag "Transparent/Image"
MagickBooleanType
proceed;
ssize_t
y;
/*
Make image color transparent.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnshiftImageList() adds the image to the beginning of the list.
%
% Deprecated, replace with:
%
% PrependImageToList(images,CloneImageList(image,exception));
%
% The format of the UnshiftImageList method is:
%
% unsigned int UnshiftImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int UnshiftImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
PrependImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ V a l i d a t e C o l o r m a p I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ValidateColormapIndex() validates the colormap index. If the index does
% not range from 0 to the number of colors in the colormap an exception
% issued and 0 is returned.
%
% Deprecated, replace with:
%
% ConstrainColormapIndex(image,index);
%
% The format of the ValidateColormapIndex method is:
%
% IndexPacket ValidateColormapIndex(Image *image,const unsigned int index)
%
% A description of each parameter follows:
%
% o index: Method ValidateColormapIndex returns colormap index if it is
% valid other an exception issued and 0 is returned.
%
% o image: the image.
%
% o index: This integer is the colormap index.
%
*/
MagickExport IndexPacket ValidateColormapIndex(Image *image,
const size_t index)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4");
return(ConstrainColormapIndex(image,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z o o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZoomImage() creates a new image that is a scaled size of an existing one.
% It allocates the memory necessary for the new Image structure and returns a
% pointer to the new image. The Point filter gives fast pixel replication,
% Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower,
% very high-quality results. See Graphic Gems III for details on this
% algorithm.
%
% The filter member of the Image structure specifies which image filter to
% use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp.
%
% The format of the ZoomImage method is:
%
% Image *ZoomImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: An integer that specifies the number of columns in the zoom
% image.
%
% o rows: An integer that specifies the number of rows in the scaled
% image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ZoomImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
Image
*zoom_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
return(zoom_image);
}
#endif
|
omp_nonmonotonic_dynamic1.c | // RUN: %libomp-compile
// RUN: env OMP_SCHEDULE=nonmonotonic:dynamic,10 %libomp-run
// The test checks iterations distribution for OMP 5.0 nonmonotonic OMP_SCHEDULE
// case #threads > #chunks (fallback to monotonic dynamic)
#include <stdio.h>
#include <omp.h>
#define ITERS 100
#define CHUNK 10
int err = 0;
int main(int argc, char **argv) {
int i, ch, it[ITERS];
omp_set_num_threads(16); // #threads is bigger than #chunks
#pragma omp parallel for schedule(runtime)
for (i = 0; i < ITERS; ++i) {
it[i] = omp_get_thread_num();
}
// check that each chunk executed by single thread
for (ch = 0; ch < ITERS/CHUNK; ++ch) {
int iter = ch * CHUNK;
int nt = it[iter]; // thread number
for (i = 1; i < CHUNK; ++i) {
#if _DEBUG
printf("iter %d: (%d %d)\n", iter + i, nt, it[iter + i]);
#endif
if (nt != it[iter + i]) {
err++;
}
}
}
if (err > 0) {
printf("Failed, err = %d\n", err);
return 1;
}
printf("Passed\n");
return 0;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
main.c | //
// main.c
// N-Body
//
// Authors:
// Emanuele Del Sozzo (emanuele.delsozzo@polimi.it), Lorenzo Di Tucci (lorenzo.ditucci@polimi.it),
// Marco Rabozzi (marco.rabozzi@polimi.it), Marco Nanni (marco3.nanni@mail.polimi.it)
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include <omp.h>
#include "support.h"
#include "parser.h"
/**
* \brief Count the number of lines in a file
* \param [in] fp File pointer
* \return The number of lines
*/
static int count_lines(FILE *fp)
{
int nl = 0;
int el = 0;
char buf[BUFSIZ];
while (fgets(buf, sizeof(buf), fp) != NULL) {
if (strchr(buf, '\n')) {
nl++;
el = 0;
} else {
el = 1;
}
}
return nl + el;
}
void final_computation(particle_t * p, coord3d_t *a, int N){
#pragma omp parallel for
for (int i = 0; i < N; i++) {
p[i].p.x += p[i].v.x;
p[i].p.y += p[i].v.y;
p[i].p.z += p[i].v.z;
p[i].v.x += a[i].x;
p[i].v.y += a[i].y;
p[i].v.z += a[i].z;
}
}
void central_computation(particle_t * p, coord3d_t *a, int N, float EPS, const float *m){
#pragma omp parallel for
for (int q = 0; q < N; q++) {
for (int j = 0; j < N; j++) {
//if (j != q) {
float rx = p[j].p.x - p[q].p.x;
float ry = p[j].p.y - p[q].p.y;
float rz = p[j].p.z - p[q].p.z;
float dd = rx*rx + ry*ry + rz*rz + EPS;
float d = 1/ (dd*sqrtf(dd));
float s = m[j] * d;
a[q].x += rx * s;
a[q].y += ry * s;
a[q].z += rz * s;
//}
}
}
}
void data_generation(int N, particle_t **particles, float **m, params_t args_info){
if (!args_info.random && !args_info.file) {
print_usage();
exit(EXIT_FAILURE);
}
if (args_info.random) {
*particles = (particle_t *) calloc(N, sizeof(particle_t));
*m = (float *) calloc(N, sizeof(float));
srand(100);
for (int i = 0; i < N; i++)
{
(*m)[i] = (float)rand()/100000;
(*particles)[i].p.x = (float)rand()/100000;
(*particles)[i].p.y = (float)rand()/100000;
(*particles)[i].p.z = (float)rand()/100000;
(*particles)[i].v.x = (float)rand()/100000;
(*particles)[i].v.y = (float)rand()/100000;
(*particles)[i].v.z = (float)rand()/100000;
}
} else {
const char *filename = args_info.file_name;
FILE *fp = fopen(args_info.file_name, "r");
if (fp == NULL) {
fprintf(stderr, "Failed to open input file: `%s'\n", filename);
exit(EXIT_FAILURE);
}
N = count_lines(fp) - 1;
if (args_info.num_particles < N) {
N = args_info.num_particles;
}
*particles = (particle_t *) calloc(N, sizeof(particle_t));
*m = (float *) calloc(N, sizeof(float));
rewind(fp);
fscanf(fp, "m,x,y,z,vx,vy,vz\n");
for (int i = 0; i < N; i++) {
fscanf(fp, "%g,%g,%g,%g,%g,%g,%g", &((*m)[i]),
&((*particles)[i]).p.x, &((*particles)[i]).p.y, &((*particles)[i]).p.z,
&((*particles)[i]).v.x, &((*particles)[i]).v.y, &((*particles)[i]).v.z);
}
fclose(fp);
}
}
/**
* \brief Run the N-body simulation on the CPU.
* \param [in] N Number of particles
* \param [in] nt Number of time-steps
* \param [in] EPS Damping factor
* \param [in] m Masses of the N particles
* \param [in] in_particles Initial state of the N particles
* \param [out] out_particles Final state of the N particles after nt time-steps
* \param [out] time Execution time
*/
void run_cpu(int N, int nt, float EPS, const float *m,
const particle_t *in_particles, particle_t *out_particles,
double *time)
{
particle_t *p = (particle_t *) malloc(N * sizeof(particle_t));
memcpy(p, in_particles, N * sizeof(particle_t));
coord3d_t *a = (coord3d_t *) malloc(N * sizeof(coord3d_t));
double wall_time_start, wall_time_end;
double time_it_start, time_it_end;
double time_up_start, time_up_end;
wall_time_start = get_time();
outer_loop:for (int t = 0; t < nt; t++) {
memset(a, 0, N * sizeof(coord3d_t));
time_it_start = get_time();
central_computation(p,a,N, EPS, m);
time_it_end = get_time();
time_up_start = get_time();
final_computation(p,a,N);
time_up_end = get_time();
}
wall_time_end = get_time();
*time = time_it_end - time_it_start;
memcpy(out_particles, p, N * sizeof(particle_t));
free(p);
free(a);
}
int main(int argc, char **argv)
{
params_t args_info;
if (parse_input(argc, argv, &args_info) != 0) {
exit(EXIT_FAILURE);
}
int N = args_info.num_particles;
int nt = args_info.num_timesteps;
float EPS = args_info.EPS;
if (EPS == 0) {
fprintf(stderr, "EPS cannot be set to zero\n");
exit(EXIT_FAILURE);
}
particle_t *particles;
float *m;
data_generation(N, &particles, &m, args_info);
double cpuTime = 0;
particle_t *cpu_particles = NULL;
cpu_particles = (particle_t *) malloc(N * sizeof(particle_t));
puts("Running on CPU...\n");
run_cpu(N, nt, EPS, m, particles, cpu_particles, &cpuTime);
printf("CPU execution time: %.3gs\n", cpuTime);
free_params_t(&args_info);
free(particles);
free(m);
free(cpu_particles);
return 0;
}
|
example3.c | #include "multi_fbeam.h"
#include <sys/stat.h>
#include <errno.h>
#include <png.h>
typedef struct image_data{
char dir_name[64]; // directory name to output image
int scale; // number for enlarge the output image
int m; // sampling number
double rang; // range of sampling
int ts; // time step per cycle
double complex *ve,*vh; // electromagnetic field data
double me[3],mh[3]; // maximum amplitude of each field component
}IMD;
void make_directory(char *dir_name);
void eh_field_x(IMD *id,Bobj *bm);
void eh_field_y(IMD *id,Bobj *bm);
void eh_field_z(IMD *id,Bobj *bm);
void output_field(char *pl,IMD *id,Bobj *bm);
// color table
png_byte ct1[9][3]={{0x00,0x00,0x90},{0x00,0x0f,0xff},{0x00,0x90,0xff},{0x0f,0xff,0xee},
{0xff,0xff,0xff},{0xff,0xee,0x00},{0xff,0x70,0x00},{0xee,0x00,0x00},{0x7f,0x00,0x00}};
/*
png_byte ct1[9][3]={{0x00,0x00,0x90},{0x00,0x0f,0xff},{0x00,0x90,0xff},{0x0f,0xff,0xee},
{0x90,0xff,0x70},{0xff,0xee,0x00},{0xff,0x70,0x00},{0xee,0x00,0x00},{0x7f,0x00,0x00}};
*/
int main()
{
Bobj bm;
IMD id;
init_mfb(&bm);
read_data_mfb(&bm);
print_data_mfb(&bm);
setup_mfb(&bm);
sprintf(id.dir_name,"images"); // directory name to output image
id.scale=1; // number for enlarge the output image
id.m=200; // sampling number
id.rang=4.0*bm.lambda_0; // range of sampling
id.ts=40; // time step per cycle
make_directory(id.dir_name);
id.ve=(double complex *)m_alloc2(id.m*id.m*3,sizeof(double complex),"example3.c, ve");
id.vh=(double complex *)m_alloc2(id.m*id.m*3,sizeof(double complex),"example3.c, vh");
// x=0 plane
eh_field_x(&id,&bm);
output_field("yz",&id,&bm);
// y=0 plane
eh_field_y(&id,&bm);
output_field("xz",&id,&bm);
// z=0 plane
eh_field_z(&id,&bm);
output_field("xy",&id,&bm);
free(id.ve);
free(id.vh);
free_mfb(&bm);
return 0;
}
void make_directory(char *dir_name)
{
int ret;
ret=mkdir(dir_name,S_IRWXU|S_IRWXG);
if(ret!=0 && errno!=EEXIST){
printf("failed to make directory. Exit..");
exit(1);
}
}
void eh_field_x(IMD *id,Bobj *bm)
{
double complex e[3],h[3];
double x[3],dr;
int i,j,d;
dr=id->rang*2.0/(double)(id->m-1);
for(i=0;i<3;i++){
id->me[i]=0.0;
id->mh[i]=0.0;
}
// x=0 plane
x[0]=0.0;
#pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h)
for(i=0;i<id->m;i++){
x[2]=id->rang-(double)i*dr;
for(j=0;j<id->m;j++){
x[1]=-id->rang+(double)j*dr;
calc_mfb_EH(e,h,x,bm);
#pragma omp critical
for(d=0;d<3;d++){
if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]);
if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]);
}
for(d=0;d<3;d++){
id->ve[i*id->m*3+j*3+d]=e[d];
id->vh[i*id->m*3+j*3+d]=h[d];
}
}
}
}
void eh_field_y(IMD *id,Bobj *bm)
{
double complex e[3],h[3];
double x[3],dr;
int i,j,d;
dr=id->rang*2.0/(double)(id->m-1);
for(i=0;i<3;i++){
id->me[i]=0.0;
id->mh[i]=0.0;
}
// y=0 plane
x[1]=0.0;
#pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h)
for(i=0;i<id->m;i++){
x[2]=id->rang-(double)i*dr;
for(j=0;j<id->m;j++){
x[0]=-id->rang+(double)j*dr;
calc_mfb_EH(e,h,x,bm);
#pragma omp critical
for(d=0;d<3;d++){
if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]);
if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]);
}
for(d=0;d<3;d++){
id->ve[i*id->m*3+j*3+d]=e[d];
id->vh[i*id->m*3+j*3+d]=h[d];
}
}
}
}
void eh_field_z(IMD *id,Bobj *bm)
{
double complex e[3],h[3];
double x[3],dr;
int i,j,d;
dr=id->rang*2.0/(double)(id->m-1);
for(i=0;i<3;i++){
id->me[i]=0.0;
id->mh[i]=0.0;
}
// z=0 plane
x[2]=0.0;
#pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h)
for(i=0;i<id->m;i++){
x[1]=id->rang-(double)i*dr;
for(j=0;j<id->m;j++){
x[0]=-id->rang+(double)j*dr;
calc_mfb_EH(e,h,x,bm);
#pragma omp critical
for(d=0;d<3;d++){
if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]);
if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]);
}
for(d=0;d<3;d++){
id->ve[i*id->m*3+j*3+d]=e[d];
id->vh[i*id->m*3+j*3+d]=h[d];
}
}
}
}
void output_field(char *pl,IMD *id,Bobj *bm)
{
void output_png(int nt,double complex cet,char *pl,IMD *id);
void output_color_bar(IMD *id);
FILE *fp;
char fn[128];
double dt;
int n;
dt=bm->lambda_0/(double)id->ts;
#pragma omp parallel for schedule(dynamic)
for(n=0;n<id->ts;n++){
output_png(n,cexp(-I*bm->omega*dt*(double)n),pl,id);
}
// print info
sprintf(fn,"%s/%s_info.txt",id->dir_name,pl);
fp=fopen(fn,"wt");
if(fp==NULL){
printf("Failed to open the %s file. Exit...\n",fn);
exit(1);
}
fprintf(fp,"the range of color bar\n");
fprintf(fp,"Ex is %8e to %8e\n",-id->me[0],id->me[0]);
fprintf(fp,"Ey is %8e to %8e\n",-id->me[1],id->me[1]);
fprintf(fp,"Ez is %8e to %8e\n",-id->me[2],id->me[2]);
fprintf(fp,"Hx is %8e to %8e\n",-id->mh[0],id->mh[0]);
fprintf(fp,"Hy is %8e to %8e\n",-id->mh[1],id->mh[1]);
fprintf(fp,"Hz is %8e to %8e\n",-id->mh[2],id->mh[2]);
fclose(fp);
// output color bar image
output_color_bar(id);
}
void output_png(int nt,double complex cet,char *pl,IMD *id)
{
int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b); // -1 <= x <= 1
FILE *fep[3],*fhp[3];
char fname[256],sf[3]={'x','y','z'};
int j,i,sj,si,d,m,scale;
png_uint_32 width,height;
png_structp png_e[3],png_h[3];
png_infop info_e[3],info_h[3];
png_bytepp pd_e[3],pd_h[3];
png_byte r,g,b;
m=id->m;
scale=id->scale;
width =m*(scale+1);
height=m*(scale+1);
for(d=0;d<3;d++){
png_e[d] =png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info_e[d]=png_create_info_struct(png_e[d]);
sprintf(fname,"%s/%s_E%c_%03d.png",id->dir_name,pl,sf[d],nt);
fep[d]=fopen(fname,"wb");
if(fep[d]==NULL){
printf("Failed to open the %s file. Exit...\n",fname);
exit(1);
}
png_h[d] =png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info_h[d]=png_create_info_struct(png_h[d]);
sprintf(fname,"%s/%s_H%c_%03d.png",id->dir_name,pl,sf[d],nt);
fhp[d]=fopen(fname,"wb");
if(fhp[d]==NULL){
printf("Failed to open the %s file. Exit...\n",fname);
exit(1);
}
png_init_io(png_e[d],fep[d]);
png_set_IHDR(png_e[d],info_e[d],width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
pd_e[d]=(png_bytepp)png_malloc(png_e[d],sizeof(png_bytep)*height);
png_set_rows(png_e[d],info_e[d],pd_e[d]);
png_init_io(png_h[d],fhp[d]);
png_set_IHDR(png_h[d],info_h[d],width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
pd_h[d]=(png_bytepp)png_malloc(png_h[d],sizeof(png_bytep)*height);
png_set_rows(png_h[d],info_h[d],pd_h[d]);
for(j=0;j<height;j++){
pd_e[d][j]=(png_bytep)png_malloc(png_e[d],sizeof(png_byte)*width*3);
pd_h[d][j]=(png_bytep)png_malloc(png_h[d],sizeof(png_byte)*width*3);
}
}
for(i=0;i<m;i++){
for(j=0;j<m;j++){
for(d=0;d<3;d++){
color_rgb(creal(cet*id->ve[i*m*3+j*3+d])/id->me[d],&r,&g,&b);
for(si=0;si<=scale;si++){
for(sj=0;sj<=scale;sj++){
pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+0]=r;
pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+1]=g;
pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+2]=b;
}
}
color_rgb(creal(cet*id->vh[i*m*3+j*3+d])/id->mh[d],&r,&g,&b);
for(si=0;si<=scale;si++){
for(sj=0;sj<=scale;sj++){
pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+0]=r;
pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+1]=g;
pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+2]=b;
}
}
}
}
}
for(d=0;d<3;d++){
png_write_png(png_e[d],info_e[d],PNG_TRANSFORM_IDENTITY,NULL);
png_write_png(png_h[d],info_h[d],PNG_TRANSFORM_IDENTITY,NULL);
for(j=0;j<height;j++){
png_free(png_e[d],pd_e[d][j]);
png_free(png_h[d],pd_h[d][j]);
}
png_free(png_e[d],pd_e[d]);
png_free(png_h[d],pd_h[d]);
fclose(fep[d]);
fclose(fhp[d]);
}
}
void output_color_bar(IMD *id)
{
int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b); // -1 <= x <= 1
FILE *fp;
char fname[128];
int j,i;
png_uint_32 width,height;
png_structp png;
png_infop info;
png_bytepp pdata;
png_byte r,g,b;
sprintf(fname,"%s/color_bar.png",id->dir_name);
height=id->m*(id->scale+1);
width=height/16;
png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info= png_create_info_struct(png);
fp=fopen(fname,"wb");
if(fp==NULL){
printf("Failed to open the %s file. Exit...\n",fname);
exit(1);
}
png_init_io(png, fp);
png_set_IHDR(png,info,width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
pdata=(png_bytepp)png_malloc(png, sizeof(png_bytep)*height);
png_set_rows(png,info,pdata);
for(j=0;j<height;j++){
pdata[j]=(png_bytep)png_malloc(png,sizeof(png_byte)*width*3);
}
for(i=0;i<height;i++){
color_rgb(1.0-(2.0/(double)height)*(double)i,&r,&g,&b);
for(j=0;j<width;j++){
pdata[i][j*3+0]=r;
pdata[i][j*3+1]=g;
pdata[i][j*3+2]=b;
}
}
png_write_png(png, info, PNG_TRANSFORM_IDENTITY, NULL);
for(j=0;j<height;j++){
png_free(png,pdata[j]);
}
png_free(png,pdata);
fclose(fp);
}
int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b) // -1 <= x <= 1
{
double i_nc,dr,dg,db;
unsigned int i,n,nc,nd;
if(x<-1.0 || x>1.0){
*r=0x00; *g=0x00; *b=0x00;
return -1;
}
n=(unsigned int)floor(pow(2,23)*(x+1.0));
nc=(unsigned int)pow(2,21);
i_nc=1.0/(double)nc;
if(n<nc*1) i=1;
else if(n<nc*2) i=2;
else if(n<nc*3) i=3;
else if(n<nc*4) i=4;
else if(n<nc*5) i=5;
else if(n<nc*6) i=6;
else if(n<nc*7) i=7;
else if(n<nc*8) i=8;
else {
*r=ct1[8][0]; *g=ct1[8][1]; *b=ct1[8][2];
return 0;
}
nd=n-nc*(i-1);
dr=(double)(ct1[i][0]-ct1[i-1][0])*i_nc;
dg=(double)(ct1[i][1]-ct1[i-1][1])*i_nc;
db=(double)(ct1[i][2]-ct1[i-1][2])*i_nc;
*r=(png_byte)floor((double)ct1[i-1][0]+dr*(double)nd);
*g=(png_byte)floor((double)ct1[i-1][1]+dg*(double)nd);
*b=(png_byte)floor((double)ct1[i-1][2]+db*(double)nd);
return 0;
}
|
GB_binop__isgt_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint16)
// A*D function (colscale): GB (_AxD__isgt_uint16)
// D*A function (rowscale): GB (_DxB__isgt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint16)
// C=scalar+B GB (_bind1st__isgt_uint16)
// C=scalar+B' GB (_bind1st_tran__isgt_uint16)
// C=A+scalar GB (_bind2nd__isgt_uint16)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quadrature_omp.c | #include "quadrature.h"
#include <omp.h>
double trapezoid(double (*fp)(double), double a, double b, int N)
{
double h = (b-a)/(N-1);
double f_a, f_b, trapezoid = 0.0;
// Perform the trapezoid rule
#pragma omp parallel for reduction(+: trapezoid)
for (int i = 0; i < N; ++i)
{
if (i == 0)
{
f_a = (*fp)(a);
trapezoid += f_a;
}
else if (i == N-1)
{
f_b = (*fp)(a + h*i);
trapezoid += f_b;
}
else
{
trapezoid += (*fp)(a + h*i);
}
}
trapezoid = h*(trapezoid) - 0.5*h*(f_a + f_b);
return trapezoid;
}
void error_table(double (*fp)(double), double a, double b,
int nrows, int nvals[], double int_true)
{
double last_error = 0.0, error, int_trap, ratio;
printf("%8s %22s %13s %13s\n", "n", "trapezoid", "error", "ratio");
for (int i = 0; i < nrows; ++i)
{
int_trap = trapezoid(fp, a, b, nvals[i]);
error = fabs(int_trap - int_true);
ratio = last_error / error;
last_error = error; // for next n
printf("%8d %22.14e %13.3e %13.3e\n", nvals[i], int_trap, error, ratio);
}
}
|
eaw-experimental.c | #include "eaw-experimental.h"
#include "libdwt.h"
#include "inline.h"
#include <assert.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <limits.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* @brief Copy memory area.
*
* This function copies @p n floats from memory area @p src to memory area
* @p dst. Memory areas can be sparse. The strides (in bytes) are determined by
* @p stride_dst and @p stride_src arguments.
*
* @returns The function returns a pointer to @p dst.
*/
static
void *dwt_util_memcpy_stride_s(
void *restrict dst,
ssize_t stride_dst,
const void *restrict src,
ssize_t stride_src,
size_t n ///< Number of floats to be copied, not number of bytes.
)
{
assert( NULL != dst && NULL != src );
const size_t size = sizeof(float);
if( (ssize_t)size == stride_src && (ssize_t)size == stride_dst )
{
memcpy(dst, src, n*size);
}
else
{
char *restrict ptr_dst = (char *restrict)dst;
const char *restrict ptr_src = (const char *restrict)src;
for(size_t i = 0; i < n; i++)
{
*(float *restrict)ptr_dst = *(const float *restrict)ptr_src;
ptr_dst += stride_dst;
ptr_src += stride_src;
}
}
return dst;
}
static
float dwt_eaw_w(float n, float m, float alpha)
{
const float eps = 1.0e-5f;
return 1.f / (powf(fabsf(n-m), alpha) + eps);
}
static
void dwt_calc_eaw_w(float *w, float *arr, int N, float alpha)
{
for(int i = 0; i < N-1; i++)
{
w[i] = dwt_eaw_w(arr[i], arr[i+1], alpha);
}
w[N-1] = 0.f; // not necessary
}
void dwt_eaw97_f_ex_stride_s(
const float *src,
float *dst_l,
float *dst_h,
float *tmp,
int N,
int stride,
float *w,
float alpha
)
{
assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride );
// fix for small N
if(N < 2)
{
if(1 == N)
dst_l[0] = src[0] * dwt_cdf97_s1_s;
return;
}
// copy src into tmp
dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N);
dwt_calc_eaw_w(w, tmp, N, alpha);
// predict 1 + update 1
for(int i=1; i<N-2+(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] -= (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_p1_s);
}
if( is_odd(N) )
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u1_s);
}
else
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p1_s);
}
{
float wL = w[0];
float wR = w[0];
tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u1_s);
}
for(int i=2; i<N-(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] += (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_u1_s);
}
// predict 2 + update 2
for(int i=1; i<N-2+(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] -= (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_p2_s);
}
if( is_odd(N) )
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u2_s);
}
else
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p2_s);
}
{
float wL = w[0];
float wR = w[0];
tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u2_s);
}
for(int i=2; i<N-(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] += (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_u2_s);
}
// scale
for(int i=0; i<N; i+=2)
tmp[i] = tmp[i] * dwt_cdf97_s1_s;
for(int i=1; i<N; i+=2)
tmp[i] = tmp[i] * dwt_cdf97_s2_s;
// copy tmp into dst
dwt_util_memcpy_stride_s(dst_l, stride, tmp+0, 2*sizeof(float), ceil_div2(N));
dwt_util_memcpy_stride_s(dst_h, stride, tmp+1, 2*sizeof(float), floor_div2(N));
}
void dwt_eaw97_i_ex_stride_s(
const float *src_l,
const float *src_h,
float *dst,
float *tmp,
int N,
int stride,
float *w
)
{
assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride );
// fix for small N
if(N < 2)
{
if(1 == N)
dst[0] = src_l[0] * dwt_cdf97_s2_s;
return;
}
// copy src into tmp
dwt_util_memcpy_stride_s(tmp+0, 2*sizeof(float), src_l, stride, ceil_div2(N));
dwt_util_memcpy_stride_s(tmp+1, 2*sizeof(float), src_h, stride, floor_div2(N));
// inverse scale
for(int i=0; i<N; i+=2)
tmp[i] = tmp[i] * dwt_cdf97_s2_s;
for(int i=1; i<N; i+=2)
tmp[i] = tmp[i] * dwt_cdf97_s1_s;
// backward update 2 + backward predict 2
for(int i=2; i<N-(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] -= ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_u2_s);
}
{
float wL = w[0];
float wR = w[0];
tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u2_s);
}
if( is_odd(N) )
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u2_s);
}
else
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p2_s);
}
for(int i=1; i<N-2+(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] += ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_p2_s);
}
// backward update 1 + backward predict 1
for(int i=2; i<N-(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] -= ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_u1_s);
}
{
float wL = w[0];
float wR = w[0];
tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u1_s);
}
if( is_odd(N) )
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u1_s);
}
else
{
float wL = w[N-2];
float wR = w[N-2];
tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p1_s);
}
for(int i=1; i<N-2+(N&1); i+=2)
{
float wL = w[i-1];
float wR = w[i+0];
tmp[i] += ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_p1_s);
}
// copy tmp into dst
dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N);
}
void dwt_eaw97_2f_s(
void *ptr,
int stride_x,
int stride_y,
int size_o_big_x,
int size_o_big_y,
int size_i_big_x,
int size_i_big_y,
int *j_max_ptr,
int decompose_one,
int zero_padding,
float *wH[],
float *wV[],
float alpha
)
{
const int size_o_big_min = min(size_o_big_x,size_o_big_y);
const int size_o_big_max = max(size_o_big_x,size_o_big_y);
float temp[size_o_big_max];
if(NULL == temp)
abort();
int j = 0;
const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min);
if( *j_max_ptr < 0 || *j_max_ptr > j_limit )
*j_max_ptr = j_limit;
for(;;)
{
if( *j_max_ptr == j )
break;
// dwt_util_log(LOG_DBG, "FWD-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j, j);
const int size_o_src_x = ceil_div_pow2(size_o_big_x, j );
const int size_o_src_y = ceil_div_pow2(size_o_big_y, j );
const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1);
const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1);
const int size_i_src_x = ceil_div_pow2(size_i_big_x, j );
const int size_i_src_y = ceil_div_pow2(size_i_big_y, j );
wH[j] = dwt_util_alloc(size_o_src_y * size_i_src_x, sizeof(float));
wV[j] = dwt_util_alloc(size_o_src_x * size_i_src_y, sizeof(float));
#pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads()))
for(int y = 0; y < size_o_src_y; y++)
dwt_eaw97_f_ex_stride_s(
addr2_s(ptr,y,0,stride_x,stride_y),
addr2_s(ptr,y,0,stride_x,stride_y),
addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y),
temp,
size_i_src_x, // N
stride_y,
&wH[j][y*size_i_src_x],
alpha
);
#pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads()))
for(int x = 0; x < size_o_src_x; x++)
dwt_eaw97_f_ex_stride_s(
addr2_s(ptr,0,x,stride_x,stride_y),
addr2_s(ptr,0,x,stride_x,stride_y),
addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y),
temp,
size_i_src_y, // N
stride_x,
&wV[j][x*size_i_src_y],
alpha
);
if(zero_padding)
{
#pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads()))
for(int y = 0; y < size_o_src_y; y++)
dwt_zero_padding_f_stride_s(
addr2_s(ptr,y,0,stride_x,stride_y),
addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y),
size_i_src_x,
size_o_dst_x,
size_o_src_x-size_o_dst_x,
stride_y);
#pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads()))
for(int x = 0; x < size_o_src_x; x++)
dwt_zero_padding_f_stride_s(
addr2_s(ptr,0,x,stride_x,stride_y),
addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y),
size_i_src_y,
size_o_dst_y,
size_o_src_y-size_o_dst_y,
stride_x);
}
j++;
}
}
void dwt_eaw97_2i_s(
void *ptr,
int stride_x,
int stride_y,
int size_o_big_x,
int size_o_big_y,
int size_i_big_x,
int size_i_big_y,
int j_max,
int decompose_one,
int zero_padding,
float *wH[],
float *wV[]
)
{
const int size_o_big_min = min(size_o_big_x,size_o_big_y);
const int size_o_big_max = max(size_o_big_x,size_o_big_y);
float temp[size_o_big_max];
if(NULL == temp)
abort();
int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min);
if( j_max >= 0 && j_max < j )
j = j_max;
for(;;)
{
if(0 == j)
break;
// dwt_util_log(LOG_DBG, "INV-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j-1, j-1);
const int size_o_src_x = ceil_div_pow2(size_o_big_x, j );
const int size_o_src_y = ceil_div_pow2(size_o_big_y, j );
const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1);
const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1);
const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1);
const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1);
#pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads()))
for(int x = 0; x < size_o_dst_x; x++)
dwt_eaw97_i_ex_stride_s(
addr2_s(ptr,0,x,stride_x,stride_y),
addr2_s(ptr,size_o_src_y,x,stride_x,stride_y),
addr2_s(ptr,0,x,stride_x,stride_y),
temp,
size_i_dst_y, // N
stride_x,
&wV[j-1][x*size_i_dst_y]
);
#pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads()))
for(int y = 0; y < size_o_dst_y; y++)
dwt_eaw97_i_ex_stride_s(
addr2_s(ptr,y,0,stride_x,stride_y),
addr2_s(ptr,y,size_o_src_x,stride_x,stride_y),
addr2_s(ptr,y,0,stride_x,stride_y),
temp,
size_i_dst_x, // N
stride_y,
&wH[j-1][y*size_i_dst_x]
);
if(zero_padding)
{
#pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads()))
for(int y = 0; y < size_o_dst_y; y++)
dwt_zero_padding_i_stride_s(
addr2_s(ptr,y,0,stride_x,stride_y),
size_i_dst_x,
size_o_dst_x,
stride_y);
#pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads()))
for(int x = 0; x < size_o_dst_x; x++)
dwt_zero_padding_i_stride_s(
addr2_s(ptr,0,x,stride_x,stride_y),
size_i_dst_y,
size_o_dst_y,
stride_x);
}
j--;
}
}
|
solucao_omp_barrier.c | /******************************************************************************
* FILE: omp_bug3.c
* DESCRIPTION:
* Run time error
* AUTHOR: Blaise Barney 01/09/04
* LAST REVISED: 06/28/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
int main (int argc, char *argv[])
{
int i, nthreads, tid, section;
float a[N], b[N], c[N];
void print_results(float array[N], int tid, int section);
/* Some initializations */
for (i=0; i<N; i++)
a[i] = b[i] = i * 1.0;
#pragma omp parallel private(c,i,tid,section)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
/*** Use barriers for clean output ***/
#pragma omp barrier
printf("Thread %d starting...\n",tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
section = 1;
for (i=0; i<N; i++)
c[i] = a[i] * b[i];
print_results(c, tid, section);
}
#pragma omp section
{
section = 2;
for (i=0; i<N; i++)
c[i] = a[i] + b[i];
print_results(c, tid, section);
}
} /* end of sections */
/*** Use barrier for clean output ***/
#pragma omp barrier
printf("Thread %d exiting...\n",tid);
} /* end of parallel section */
}
void print_results(float array[N], int tid, int section)
{
int i,j;
j = 1;
/*** use critical for clean output ***/
#pragma omp critical
{
printf("\nThread %d did section %d. The results are:\n", tid, section);
for (i=0; i<N; i++) {
printf("%e ",array[i]);
j++;
if (j == 6) {
printf("\n");
j = 1;
}
}
printf("\n");
} /*** end of critical ***/
}
|
scheduler-clause.c | /*
* sheduler-clause.c
*
* Created on: 28/04/2014
* Author: Carlos de la Torre
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 20, a[n], suma = 0;
if (argc < 2) {
fprintf(stderr, "\nFalta iteraciones \n");
exit(-1);
}
n = atoi(argv[1]);
if (n > 20)
n = 20;
for (i = 0; i < n; i++)
a[i] = i;
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(runtime)
for (i = 0; i < n; i++) {
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n", omp_get_thread_num(), i, a[i], suma);
}
printf("Fuera de 'parallel for' suma=%d\n", suma);
return 0;
}
|
mandelbrot_set.c | /**
* Copyright (c) 2020 Eros Fabrici - eros.fabrici@gmail.com
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <omp.h>
#include <mpi.h>
#include <string.h>
#include <stdbool.h>
#include <sys/syscall.h>
//mpi tags
#define _GNU_SOURCE
#define DATA 0
#define RESULT 1
#define TERMINATE 2
#define REMAINDER 3
#define MASTER 0
//fixed height of the chunks to be distributed to the slaves
//the dim of a chunk is N_x * FIXED_HEIGHT
#define FIXED_HEIGHT 20
//THREADS load balancing times
#ifdef _OPENMP
#define LOAD_BALANCE
#endif
unsigned short I_max, header_size; //header_size = size of the pgm header
//job_size = N_x * FIXED_HEIGHT or (N_x * job_remainder) = job_remainder_size
//where job_remainder = N_y % FIXED_SIZE
unsigned int N_x, N_y, job_size, job_height, job_remainder, job_remainder_size;
int pid, world_size, nthreads;
double x_L, x_R, y_L, y_R;
double d_x, d_y;
char* header;
double** timer_threads;
double starting_time;
MPI_File output_file;
MPI_Status file_stat;
struct complex
{
double real;
double imag;
};
void init_env(int argc, char** argv);
void init_MPI_env(int argc, char** argv);
void master();
void slave();
void compute_mandelbrot(unsigned int row_offset, unsigned int work_amount, unsigned short* buffer);
unsigned short compute_pixel(struct complex c, unsigned short max_iter);
int main(int argc, char** argv) {
init_env(argc, argv);
init_MPI_env(argc, argv);
#ifdef _OPENMP
#pragma omp parallel
#pragma omp single
nthreads = omp_get_num_threads();
if(pid == MASTER)
{
if (world_size == 1)
printf("\n\nOMP EXECUTION WITH %d threads\n", nthreads);
else
printf("\n\nMPI+OMP EXECUTION WITH %d processes and %d threads\n", world_size, nthreads);
}
#ifdef LOAD_BALANCE
timer_threads = (double**) malloc(sizeof(double*) * world_size);
{
int i;
for (i = 0; i < world_size; i++)
{
*(timer_threads + i) = (double*) malloc(sizeof(double) * nthreads);
}
}
#endif
starting_time = omp_get_wtime();
#else
if (pid==MASTER) printf("\n\nMPI EXECUTION with %d processes\n", world_size);
starting_time = MPI_Wtime();
#endif
pid == MASTER ? master() : slave();
MPI_File_close(&output_file);
#if defined(_OPENMP) && defined(LOAD_BALANCE)
printf("Process %d; WTime: %f\n", pid, omp_get_wtime() - starting_time);
{
int i;
for (i = 0; i < nthreads; i++)
{
printf("PID: %d thread %d time %f\n", pid, i, timer_threads[pid][i]);
}
}
#else
printf("Process %d; WTime: %f\n", pid, MPI_Wtime() - starting_time);
#endif
MPI_Finalize();
return 0;
}
/**
* Initialise enviroment
*/
void init_env(int argc, char** argv)
{
// resolution of the pgm file
N_x = atoi(argv[1]), N_y = atoi(argv[2]);
if(N_y < FIXED_HEIGHT)
{
printf("N_y must be at least %d\nexiting..", FIXED_HEIGHT);
exit(0);
}
//area
x_L = atof(argv[3]), y_L = atof(argv[4]);
x_R = atof(argv[5]), y_R = atof(argv[6]);
//maxium iterations
I_max = atoi(argv[7]);
d_x = (x_R - x_L) / N_x;
d_y = (y_R - y_L) / N_y;
}
/**
* Initialise MPI environment
*/
void init_MPI_env(int argc, char** argv)
{
#ifdef _OPENMP
int mpi_provided_thread_level;
MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED,
&mpi_provided_thread_level);
if (mpi_provided_thread_level < MPI_THREAD_FUNNELED)
{
printf("a problem arise when asking for MPI_THREAD_FUNNELED level\n");
MPI_Finalize();
exit( 1 );
}
#else
MPI_Init(&argc, &argv);
#endif
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
//header for the pgm file
header = (char*) malloc(50 * sizeof(char));
sprintf(header, "P5\n%d %d\n%d\n", N_x, N_y, I_max);
header_size = strlen(header);
bool check = world_size > 2;
job_height = check ? FIXED_HEIGHT : N_y;
if (check)
{
job_remainder = (N_y % job_height);
job_remainder_size = job_remainder * N_x;
}
job_size = job_height * N_x;
}
/**
* method that will be executed by the master only, which will act
* as a manager that redistributes the work
*/
void master()
{
MPI_Status stat, file_stat;
//open the file only in the master and write the header
MPI_File_open(MPI_COMM_SELF, "mandelbrot_set_parallel.pgm",
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &output_file);
MPI_File_write(output_file, header, header_size, MPI_CHAR, &file_stat);
MPI_File_close(&output_file);
//collective open
MPI_File_open(MPI_COMM_WORLD, "mandelbrot_set_parallel.pgm",
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &output_file);
if (world_size == 1)
{
unsigned short* buffer = (unsigned short*) malloc(N_y * N_x * sizeof(unsigned short));
compute_mandelbrot(MASTER, N_y, buffer);
free(buffer);
return;
}
unsigned short tag, working_slaves = 1;
unsigned int row_offset = 0;
//distribute the first world_size-1 jobs
for (; working_slaves < world_size && row_offset < N_x; working_slaves++, row_offset += job_height)
{
tag = (row_offset + job_height > N_y) ? REMAINDER : DATA;
MPI_Send(&row_offset, 1, MPI_UNSIGNED, working_slaves, tag, MPI_COMM_WORLD);
}
//loop in which we assign the remaining jobs to the slaves
//as they finish the previous assigned until there are no more
//jobs to be assigned
do
{
short done;
MPI_Recv(&done, 1, MPI_SHORT, MPI_ANY_SOURCE,
MPI_ANY_TAG, MPI_COMM_WORLD, &stat);
int slave = stat.MPI_SOURCE;
working_slaves--;
//if there is still work to be made
if (row_offset < N_y)
{
//if check == true, we are issuing the last job which will have a different
//size because N_y is not divisible by job_height (i.e. 0 < job_remainder < 20)
bool check = row_offset + job_height > N_y;
tag = check ? REMAINDER : DATA;
MPI_Send(&row_offset, 1, MPI_UNSIGNED, slave, tag, MPI_COMM_WORLD);
row_offset += check ? job_remainder : job_height;
working_slaves++;
}
else
{
//otherwise we let the slave terminate
MPI_Send(&row_offset, 1, MPI_UNSIGNED, slave, TERMINATE, MPI_COMM_WORLD);
}
} while (working_slaves > 1);
}
/**
* method used by slave processes, that will carry out the work and
* send an ACK back to the master when done, then waits for a new job
* to be assigned. Terminates when it receives an messagge with tag
* TERMINATE
*/
void slave()
{
MPI_Status stat;
unsigned int row_offset;
unsigned short* buffer = (unsigned short*) malloc(job_height * N_x * sizeof(unsigned short));
MPI_File_open(MPI_COMM_WORLD, "mandelbrot_set_parallel.pgm",
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &output_file);
MPI_Recv(&row_offset, 1, MPI_UNSIGNED, MASTER, MPI_ANY_TAG, MPI_COMM_WORLD, &stat);
while (stat.MPI_TAG != TERMINATE)
{
unsigned int temp_job_height = stat.MPI_TAG == DATA ? job_height : job_remainder;
compute_mandelbrot(row_offset, temp_job_height, buffer);
short done = 1;
MPI_Send(&done, 1, MPI_SHORT, MASTER, stat.MPI_TAG, MPI_COMM_WORLD);
MPI_Recv(&row_offset, 1, MPI_UNSIGNED, MASTER, MPI_ANY_TAG, MPI_COMM_WORLD, &stat);
}
free(buffer);
}
/**
* method that compute and write the mandelbrot set slice.
* row_offset represent the from which line of the matrix to start
* work_amount indicates how many lines to compute
*/
void compute_mandelbrot(unsigned int row_offset, unsigned int work_amount, unsigned short* buffer)
{
struct complex c;
unsigned int i, j;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic, 10) private(c) collapse(2)
#endif
for(i = 0; i < work_amount; i++) {
for(j = 0; j < N_x; j++) {
#if defined(_OPENMP) && defined(LOAD_BALANCE)
double s = omp_get_wtime();
#endif
c.imag = y_L + (i + row_offset) * d_y;
c.real = x_L + j * d_x;
*(buffer + (i * N_x + j)) = compute_pixel(c, I_max);
#if defined(_OPENMP) && defined(LOAD_BALANCE)
timer_threads[pid][omp_get_thread_num()] += omp_get_wtime() - s;
#endif
}
}
//write the results using offset
MPI_File_write_at(output_file,
header_size * sizeof(char) + row_offset * N_x * sizeof(unsigned short),
buffer, N_x * work_amount, MPI_UNSIGNED_SHORT, &file_stat);
}
/**
* Function that given a complex number c and and integer,
* computes if c belongs to the Mandelbrot Set and returns
* the counter used in the loop
*/
unsigned short compute_pixel(struct complex c, unsigned short max_iter)
{
unsigned short count = 0;
struct complex z;
z.real = 0.0; z.imag = 0.0;
double temp;
do
{
temp = z.real * z.real - z.imag * z.imag + c.real;
z.imag = 2 * z.real * z.imag + c.imag;
z.real = temp;
} while ((z.real * z.real + z.imag * z.imag < 4.0) && (count++ < max_iter));
return count;
}
//----------------------------------------------------------------------------------------
//FUNCTIONS TO GET CPU ID - simple used to check that th threads were not pinned to the
//single process
int get_cpu_id( void )
{
#if defined(_GNU_SOURCE) // GNU SOURCE ------------
return sched_getcpu( );
#else
#ifdef SYS_getcpu // direct sys call ---
int cpuid;
if ( syscall( SYS_getcpu, &cpuid, NULL, NULL ) == -1 )
return -1;
else
return cpuid;
#else
unsigned val;
if ( read_proc__self_stat( CPU_ID_ENTRY_IN_PROCSTAT, &val ) == -1 )
return -1;
return (int)val;
#endif // -----------------------
#endif
}
int read_proc__self_stat( int field, int *ret_val )
/*
Other interesting fields:
pid : 0
father : 1
utime : 13
cutime : 14
nthreads : 18
rss : 22
cpuid : 39
read man /proc page for fully detailed infos
*/
{
// not used, just mnemonic
// char *table[ 52 ] = { [0]="pid", [1]="father", [13]="utime", [14]="cutime", [18]="nthreads", [22]="rss", [38]="cpuid"};
*ret_val = 0;
FILE *file = fopen( "/proc/self/stat", "r" );
if (file == NULL )
return -1;
char *line = NULL;
int ret;
size_t len;
ret = getline( &line, &len, file );
fclose(file);
if( ret == -1 )
return -1;
char *savetoken = line;
char *token = strtok_r( line, " ", &savetoken);
--field;
do { token = strtok_r( NULL, " ", &savetoken); field--; } while( field );
*ret_val = atoi(token);
free(line);
return 0;
}
|
7786.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
{
/* E := A*B */
#pragma omp
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute schedule(dynamic, 8)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute schedule(dynamic, 8)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute schedule(dynamic, 8)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
2.hello_firstprivate.c | #include <stdio.h>
#include <omp.h>
/* If the OMP_NUM_THREADS variable is set to 8 with */
/* export OMP_NUM_THREADS=8 */
/* Q1: Is the execution of the program correct? Add a */
/* data sharing clause to make it correct */
/* Q2: Are the lines always printed in the same order? */
/* Could the messages appear intermixed? */
int main ()
{
int id = -1;
#pragma omp parallel firstprivate(id)
{
id =omp_get_thread_num();
printf("(%d) Hello ",id);
printf("(%d) world!\n",id);
}
return 0;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(image,p) : OpaqueAlpha);
Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel=GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelWriteMask(similarity_image,q) == 0)
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#include "../examples/openmp_dot_product/scalar/runner.h"
int getMove(double p, unsigned int* seed) {
return (rand_r(seed) / (double) RAND_MAX) < p ? -1 : 1;
}
struct context {
int a, b, x, N, P;
double p;
int result, lifetime;
};
void not_parallel_walk(void* ctx_void) {
struct context* ctx = (struct context*) ctx_void;
int result = 0;
int lifetime = 0;
unsigned int seed = (unsigned int) omp_get_thread_num() + (unsigned int) time(NULL);
printf("seed: %u\n", seed);
for (int j = 0; j < ctx->N; ++j) {
int S = ctx->x;
for (int i = 0;; ++i) {
S += getMove(ctx->p, &seed);
//printf("%d ", S);
if (S == ctx->a || S == ctx->b) {
lifetime += i;
if (S == ctx->a) {
result -= 1;
} else if (S == ctx->b) {
result += 1;
}
break;
}
}
}
ctx->result = result;
ctx->lifetime = lifetime;
}
void parallel_walk(void* ctx_void) {
struct context* ctx = (struct context*) ctx_void;
int result = 0;
int lifetime = 0;
#pragma omp parallel num_threads(ctx->P)
{
//omp_set_num_threads(ctx->P);
unsigned int seed = (unsigned int) omp_get_thread_num() + (unsigned int) time(NULL);
printf("seed: %u\n", seed);
#pragma omp for reduction(+:result) reduction(+:lifetime)
for (int j = 0; j < ctx->N; ++j) {
int S = ctx->x;
for (int i = 0;; ++i) {
S += getMove(ctx->p, &seed);
if (S == ctx->a || S == ctx->b) {
lifetime += i;
if (S == ctx->a) {
result -= 1;
} else if (S == ctx->b) {
result += 1;
}
break;
}
}
}
}
ctx->result = result;
ctx->lifetime = lifetime;
}
int main(int args, char* argv[]) {
struct context ctx;
ctx.a = atoi(argv[1]);
ctx.b = atoi(argv[2]);
ctx.x = atoi(argv[3]);
ctx.N = atoi(argv[4]);
ctx.p = atof(argv[5]);
ctx.P = atoi(argv[6]);
double time;
if (ctx.P != 0) {
time = runner_run(parallel_walk, &ctx, "parallel walk");
} else {
time = runner_run(not_parallel_walk, &ctx, "not parallel walk");
}
FILE* out = fopen("stats.txt", "a+");
fprintf(out,
"%f %f %lfs %d %d %d %d %lf %d\n",
(ctx.result + ctx.N) / (double) (2 * ctx.N),
ctx.lifetime / (double) ctx.N,
time,
ctx.a,
ctx.b,
ctx.x,
ctx.N,
ctx.p,
ctx.P);
fclose(out);
return 0;
}
|
DataAbstract.h |
/*****************************************************************************
*
* Copyright (c) 2003-2018 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014 by Centre for Geoscience Computing (GeoComp)
*
*****************************************************************************/
#ifndef __ESCRIPT_DATAABSTRACT_H__
#define __ESCRIPT_DATAABSTRACT_H__
#include "system_dep.h"
#include "DataTypes.h"
#include "DataVector.h"
#include "FunctionSpace.h"
#include <boost/scoped_ptr.hpp>
#include "DataException.h"
#include <string>
#include <fstream>
#include <vector>
#include "Pointers.h"
namespace escript {
/**
\brief
DataAbstract provides an abstract interface for the class of containers
which hold ESyS data.
Description:
DataAbstract provides an abstract interface for the class of containers
which hold ESyS data. The container may be thought of as a 2 dimensional
array of data points where one dimension corresponds to the number of samples
and the other to the number of data points per sample as defined by the function
space associated with each Data object. The data points themselves are arrays of
reals or complexes of rank 0-4.
*/
class DataAbstract;
typedef POINTER_WRAPPER_CLASS(DataAbstract) DataAbstract_ptr;
typedef POINTER_WRAPPER_CLASS(const DataAbstract) const_DataAbstract_ptr;
class DataReady;
typedef POINTER_WRAPPER_CLASS(DataReady) DataReady_ptr;
typedef POINTER_WRAPPER_CLASS(const DataReady) const_DataReady_ptr;
class ESCRIPT_DLL_API DataAbstract : public REFCOUNT_BASE_CLASS(DataAbstract)
{
public:
typedef DataTypes::ShapeType ShapeType;
/**
\brief Return shared pointer managing this object.
If there is not already a shared pointer managing this object then create one.
Once a shared pointer is created for an object, the deallocation of the object
must be handled by shared_ptr.
\warning So, do not call this on an automatic object.
Do not call this in a method where you do not pass the shared_pointer out and
you need the object to outlast the method.
Note: This is _not_ equivalent to weak_ptr::lock.
*/
DataAbstract_ptr getPtr();
const_DataAbstract_ptr getPtr() const;
/**
\brief
Constructor for DataAbstract.
\param what - Input - The functionspace to use.
\param shape - Input - Shape of each data value.
\param isDataEmpty - Input - Is this an instance of DataEmpty (for internal use only)
*/
DataAbstract(const FunctionSpace& what, const ShapeType& shape, bool isDataEmpty=false,bool isCplx=false);
/**
\brief
Destructor for DataAbstract.
*/
virtual
~DataAbstract();
/**
\brief
Write the data as a string.
*/
virtual
std::string
toString() const = 0;
/**
\brief Return a deep copy of the current object.
*/
virtual
DataAbstract*
deepCopy() const =0 ;
/**
\brief Return an object with the same type, domain (and tags if appropriate)
as this, but all values are zeroed.
*/
virtual
DataAbstract*
zeroedCopy() const =0 ;
/**
\brief Return a data object with all points resolved.
*/
virtual
DataReady_ptr
resolve()=0;
/**
\brief
dumps the object into a netCDF file
*/
virtual
void
dump(const std::string fileName) const;
/**
\brief
Return the number of data points per sample.
*/
int
getNumDPPSample() const;
/**
\brief
Return the number of samples.
*/
int
getNumSamples() const;
bool
hasNoSamples() const
{
return getNumSamples()==0;
}
/**
\brief
Return the shape information for the point data.
The omission of a non-constant form is deliberate.
*/
const DataTypes::ShapeType&
getShape() const;
/**
\brief
Return the rank information for the point data.
*/
unsigned int
getRank() const;
/**
\brief
Return the offset for the given sample. This returns the offset for the given
point into the container holding the point data.
\param sampleNo - Input - sample number.
\param dataPointNo - Input - data point number.
*/
virtual
DataTypes::RealVectorType::size_type
getPointOffset(int sampleNo,
int dataPointNo) const = 0;
/**
\brief
Return the number of doubles stored for this Data object.
*/
virtual
DataTypes::RealVectorType::size_type
getLength() const = 0;
/**
\brief
Return the real sample data for the given tag key.
NB: If the data isn't tagged an exception will be thrown.
*/
virtual
DataTypes::real_t*
getSampleDataByTag(int tag, DataTypes::real_t dummy=0);
/**
\brief
Return the complex sample data for the given tag key.
NB: If the data isn't tagged an exception will be thrown.
*/
virtual
DataTypes::cplx_t*
getSampleDataByTag(int tag, DataTypes::cplx_t dummy);
/**
\brief Return number of tagged values stored in the data object
\warning results are only meaningful for DataTagged. All other types return 0.
This functionality is only currently used by reducers and should
not be exposed to Python without making it more generally applicable
*/
virtual
size_t
getTagCount() const;
/**
\brief
Check this and the given RHS operands are compatible. Throws
an exception if they aren't.
\param right - Input - The right hand side.
*/
void
operandCheck(const DataAbstract& right) const;
/**
\brief
Return true if a valid sample point number.
*/
bool
validSamplePointNo(int samplePointNo) const;
/**
\brief
Return true if a valid sample number.
*/
bool
validSampleNo(int sampleNo) const;
/**
\brief
Return the function space associated with this Data object.
*/
const
FunctionSpace&
getFunctionSpace() const;
/**
\brief
Return the given slice from this object.
NB: The caller is responsible for managing the object created.
*/
virtual
DataAbstract*
getSlice(const DataTypes::RegionType& region) const = 0;
/**
\brief
setTaggedValue
Description:
Assign the given value to the given tag.
NB: If the data isn't tagged an exception will be thrown.
\param tagKey - Input - Integer key.
\param pointshape - Input - the shape of the value parameter.
\param value - Input - vector to copy data value from
\param dataOffset - Input - Offset within value to begin copying from
The final parameter is to allow for the case whete the vector contains
multiple data values.
*/
virtual
void
setTaggedValue(int tagKey,
const DataTypes::ShapeType& pointshape,
const DataTypes::RealVectorType& value,
int dataOffset=0);
virtual
void
setTaggedValue(int tagKey,
const DataTypes::ShapeType& pointshape,
const DataTypes::CplxVectorType& value,
int dataOffset=0);
/**
\brief
Copy a double value to the data point dataPointNo of sample sampleNo in this object.
Description:
Copy a double value to the data point dataPointNo of sample sampleNo in this object.
\param sampleNo Input - sample number
\param dataPointNo Input - data point of the sample
\param value Input - new values for the data point
*/
virtual void
copyToDataPoint(const int sampleNo, const int dataPointNo, const DataTypes::real_t value);
virtual void
copyToDataPoint(const int sampleNo, const int dataPointNo, const DataTypes::cplx_t value);
/**
\brief
Copy the array object to the data point dataPointNo of sample sampleNo in this object.
\param sampleNo Input - sample number
\param dataPointNo Input - data point of the sample
\param value Input - new values for the data point
*/
virtual void
copyToDataPoint(const int sampleNo, const int dataPointNo, const WrappedArray& value);
/**
\brief
Return the tag number associated with the given data-point number.
If the object cannot be referenced by tag numbers, an exception
will be thrown.
*/
virtual
int
getTagNumber(int dpno);
/**
\brief
Computes a symmetric matrix (A + AT) / 2
\param ev - Output - a symmetric matrix
*/
virtual void
symmetric(DataAbstract* ev);
/**
\brief
Computes a antisymmetric matrix (A - AT) / 2
\param ev - Output - a nonsymmetric matrix
*/
virtual void
antisymmetric(DataAbstract* ev);
/**
\brief
Computes a symmetric matrix (A + A*) / 2
\param ev - Output - an hermitian matrix
*/
virtual void
hermitian(DataAbstract* ev);
/**
\brief
Computes a antisymmetric matrix (A - A*) / 2
\param ev - Output - an antihermitian matrix
*/
virtual void
antihermitian(DataAbstract* ev);
/**
\brief
Computes the trace of a matrix
\param ev - Output - the trace of a matrix
\param axis_offset
*/
virtual void
trace(DataAbstract* ev, int axis_offset);
/**
\brief
Transpose each data point of this Data object around the given axis.
\param ev - Output - the transpose of a matrix
\param axis_offset
*/
virtual void
transpose(DataAbstract* ev, int axis_offset);
/**
\brief
swaps components axis0 and axis1
\param ev - Output - swapped components
\param axis0
\param axis1
*/
virtual void
swapaxes(DataAbstract* ev, int axis0, int axis1);
/**
\brief
solves the eigenvalue problem this*V=ev*V for the eigenvalues ev
\param ev - Output - eigenvalues in increasing order at each data point
*/
virtual void
eigenvalues(DataAbstract* ev);
/**
\brief invert square matricies
\param out - Where to store the results
\return errorcode (0 indicates success)
*/
virtual int
matrixInverse(DataAbstract* out) const;
/**
\brief
sets values to zero
*/
virtual void
setToZero();
/**
\brief
solves the eigenvalue problem this*V=ev*V for the eigenvalues ev and eigenvectors V
\param ev - Output - eigenvalues in increasing order at each data point
\param V - Output - corresponding eigenvectors. They are normalized such that their length is one
and the first nonzero component is positive.
\param tol - Input - eigenvalue with relative distance tol are treated as equal.
*/
virtual void
eigenvalues_and_eigenvectors(DataAbstract* ev,DataAbstract* V,const double tol=1.e-13);
/**
\brief
reorders data sample ordered by reference_ids to the ordering of the functions space
\param reference_ids - Input - reference_ids used for current ordering
*/
virtual void
reorderByReferenceIDs(DataTypes::dim_t *reference_ids);
/**
\brief
Return the number of values in the shape for this object.
*/
unsigned int
getNoValues() const;
bool isLazy() const; // a test to determine if this object is an instance of DataLazy
virtual
bool
isConstant() const {return false;}
virtual
bool
isExpanded() const {return false;}
/**
\brief
Return true if this Data is expanded or resolves to expanded.
That is, if it has a separate value for each datapoint in the sample.
*/
virtual
bool
actsExpanded() const {return false;}
virtual
bool
isTagged() const {return false;}
bool isEmpty() const; // a fast test to determine if this object is an instance of DataEmpty
/**
\brief true if the components of datapoints are complex
*/
bool isComplex() const;
#ifdef SLOWSHARECHECK
// For this to be threadsafe, we need to be sure that this is the
// only way shared-ness is tested.
/**
\brief Is this object owned by more than one Data object
*/
bool
isShared() const
{
bool shared=false;
#pragma omp critical // because two treads could try
{ // this check at the same time
try // and shared_from_this increments count
{
shared=shared_from_this().use_count()>2;
}
catch (...)
{
}
}
return shared;
}
#endif
#ifdef EXWRITECHK
bool exclusivewritecalled; // used to check for some potential programming faults
// involving shared data.
// This flag only asserts that exclusive write has been called
// on this object, it does not definitively guarantee that
// sharing has not occurred since that call
// This flag is for internal use only may be removed without warning
#endif
/*
* Make the object complex
*/
virtual void complicate();
protected:
friend class DataLazy;
//
// The number of samples in this Data object.
// This is derived directly from the FunctionSpace.
int m_noSamples;
//
// The number of data points per sample in this Data object.
// This is derived directly from the FunctionSpace.
int m_noDataPointsPerSample;
//
// is the data made of complex components
bool m_iscompl;
private:
//
// A FunctionSpace which provides a description of the data associated
// with this Data object.
FunctionSpace m_functionSpace;
//
// The shape of the points stored in this view
DataTypes::ShapeType m_shape;
//
// The number of values in each point
unsigned int m_novalues;
//
// The rank of the points stored in this view
unsigned int m_rank;
//
// Is this an instance of DataEmpty?
bool m_isempty;
};
inline
bool
DataAbstract::isEmpty() const
{
return m_isempty;
}
inline
bool
DataAbstract::validSamplePointNo(int samplePointNo) const
{
return ((0 <= samplePointNo) && (samplePointNo < m_noDataPointsPerSample));
}
inline
bool
DataAbstract::validSampleNo(int sampleNo) const
{
return ((0 <= sampleNo) && (sampleNo < m_noSamples));
}
inline
int
DataAbstract::getNumDPPSample() const
{
if (isEmpty())
{
throw DataException("Error - Operations (getNumDPPSample) not permitted on instances of DataEmpty.");
}
return m_noDataPointsPerSample;
}
inline
int
DataAbstract::getNumSamples() const
{
if (isEmpty())
{
throw DataException("Error - Operations (getNumSamples) not permitted on instances of DataEmpty.");
}
return m_noSamples;
}
inline
const
FunctionSpace&
DataAbstract::getFunctionSpace() const
{
return m_functionSpace;
}
inline
const DataTypes::ShapeType&
DataAbstract::getShape() const
{
if (isEmpty())
{
throw DataException("Error - Operations (getShape) not permitted on instances of DataEmpty.");
}
return m_shape;
}
inline
unsigned int
DataAbstract::getRank() const
{
if (isEmpty())
{
throw DataException("Error - Operations (getRank) not permitted on instances of DataEmpty.");
}
return m_rank;
}
inline
unsigned int
DataAbstract::getNoValues() const
{
if (isEmpty())
{
throw DataException("Error - Operations (getNoValues) not permitted on instances of DataEmpty.");
}
return m_novalues;
}
} // end of namespace
#endif // __ESCRIPT_DATAABSTRACT_H__
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7.0*amount*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5.0*amount*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3.0*amount*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=1UL*GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImageChannel)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
if (image->storage_class == PseudoClass)
colormap_index=(ssize_t *) AcquireQuantumMemory(image->colors+1,
sizeof(*colormap_index));
else
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize+1,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
LookupTable.h | #ifndef _LOOKUPTABLE_H_
#define _LOOKUPTABLE_H_
/*
* LookupTable.h:
* Lookup operation, for embeddings
*
* Created on: Apr 22, 2017
* Author: mszhang
*/
#include "SparseParam.h"
#include "MyLib.h"
#include "Alphabet.h"
#include "Node.h"
#include "Graph.h"
#include "ModelUpdate.h"
#include "profiler.h"
class LookupTable {
public:
PAlphabet elems;
SparseParam E;
bool bFineTune;
int nDim;
int nVSize;
int nUNKId;
LookupTable() {
nVSize = 0;
nDim = 0;
elems = NULL;
nUNKId = -1;
bFineTune = false;
}
//random initialization
inline void initial(PAlphabet alpha, int dim, bool fineTune = true) {
elems = alpha;
nVSize = elems->size();
nUNKId = elems->from_string(unknownkey);
initialWeights(dim, fineTune);
}
//initialization by pre-trained embeddings
inline bool initial(PAlphabet alpha, const string& inFile, bool fineTune = true, dtype norm = -1) {
elems = alpha;
nVSize = elems->size();
nUNKId = elems->from_string(unknownkey);
return initialWeights(inFile, fineTune, norm);
}
inline void initialWeights(int dim, bool tune) {
if (nVSize == 0 || (nVSize == 1 && nUNKId >= 0)) {
std::cout << "please check the alphabet" << std::endl;
return;
}
nDim = dim;
E.initial(nDim, nVSize);
E.val.random(sqrt(1.0 / nDim));
//E.val.norm2one();
bFineTune = tune;
#if USE_GPU
E.val.copyFromHostToDevice();
#endif
}
// default should be fineTune, just for initialization
inline bool initialWeights(const string& inFile, bool tune, dtype norm = -1) {
if (nVSize == 0 || !elems->is_fixed() || (nVSize == 1 && nUNKId >= 0)) {
std::cout << "please check the alphabet" << std::endl;
return false;
}
ifstream inf;
if (inf.is_open()) {
inf.close();
inf.clear();
}
inf.open(inFile.c_str());
if (!inf.is_open()) {
std::cout << "please check the input file" << std::endl;
return false;
}
string strLine, curWord;
int wordId;
vector<string> sLines;
sLines.clear();
while (1) {
if (!my_getline(inf, strLine)) {
break;
}
if (!strLine.empty()) {
sLines.push_back(strLine);
}
}
inf.close();
if (sLines.size() == 0) {
return false;
}
//find the first line, decide the wordDim;
vector<string> vecInfo;
split_bychar(sLines[0], vecInfo, ' ');
nDim = vecInfo.size() - 1;
E.initial(nDim, nVSize);
std::cout << "word embedding dim is " << nDim << std::endl;
bool bHasUnknown = false;
unordered_set<int> indexers;
NRVec<dtype> sum(nDim);
sum = 0.0;
int count = 0;
for (int idx = 0; idx < sLines.size(); idx++) {
split_bychar(sLines[idx], vecInfo, ' ');
if (vecInfo.size() != nDim + 1) {
std::cout << "error embedding file" << std::endl;
}
curWord = vecInfo[0];
//we assume the keys are normalized
wordId = elems->from_string(curWord);
if (wordId >= 0) {
count++;
if (nUNKId == wordId) {
bHasUnknown = true;
}
indexers.insert(wordId);
for (int idy = 0; idy < nDim; idy++) {
dtype curValue = atof(vecInfo[idy + 1].c_str());
sum[idy] += curValue;
E.val[wordId][idy] += curValue;
}
}
}
if (count == 0) {
E.val.random(sqrt(3.0 / nDim));
#if USE_GPU
E.val.copyFromHostToDevice();
#endif
std::cout << "find no overlapped lexicons in the embedding file" << std::endl;
return false;
}
if (nUNKId >= 0 && !bHasUnknown) {
for (int idx = 0; idx < nDim; idx++) {
E.val[nUNKId][idx] = sum[idx] / (count + 1);
}
indexers.insert(nUNKId);
count++;
std::cout << unknownkey << " not found, using averaged value to initialize." << std::endl;
}
int oovWords = 0;
for (int id = 0; id < nVSize; id++) {
if (indexers.find(id) == indexers.end()) {
oovWords++;
for (int idy = 0; idy < nDim; idy++) {
E.val[id][idy] = nUNKId >= 0 ? E.val[nUNKId][idy] : sum[idy] / (count + 1);
}
}
}
std::cout << "OOV num is " << oovWords << ", total num is " << nVSize << ", embedding oov ratio is " << oovWords * 1.0 / nVSize << std::endl;
std::cout << "unknown id" << nUNKId << std::endl;
bFineTune = tune;
if (norm > 0) {
E.val.norm2one(norm);
}
#if USE_GPU
E.val.copyFromHostToDevice();
#endif
return true;
}
inline void exportAdaParams(ModelUpdate& ada) {
if (bFineTune) {
ada.addParam(&E);
}
}
inline int getElemId(const string& strFeat) {
return elems->from_string(strFeat);
}
inline void save(std::ofstream &os) const {
E.save(os);
os << bFineTune << std::endl;
os << nDim << std::endl;
os << nVSize << std::endl;
os << nUNKId << std::endl;
}
//set alpha directly
inline void load(std::ifstream &is, PAlphabet alpha) {
E.load(is);
is >> bFineTune;
is >> nDim;
is >> nVSize;
is >> nUNKId;
elems = alpha;
}
};
class LookupNode : public Node {
public:
LookupTable* param;
int xid;
LookupNode() {
xid = -1;
param = NULL;
node_type = "lookup";
}
inline void setParam(LookupTable* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
xid = -1;
}
//notice the output
//this should be leaf nodes
void forward(Graph *cg, const string& strNorm) {
assert(param != NULL);
xid = param->getElemId(strNorm);
if (xid < 0 && param->nUNKId >= 0) {
xid = param->nUNKId;
}
if (param->bFineTune && xid < 0) {
std::cout << "Caution: unknown words are not modeled !" << std::endl;
}
degree = 0;
cg->addNode(this);
}
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
bool typeEqual(PNode other) override {
bool result = Node::typeEqual(other);
if (!result) return false;
LookupNode* conv_other = (LookupNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
size_t typeHashCode() const override {
return Node::typeHashCode() ^ ::typeHashCode(param);
}
// for which do no require merge
void compute() {
if (xid >= 0) {
param->E.value(xid, val);
} else {
val.zero();
}
}
void backward() {
assert(param != NULL);
if (xid == param->nUNKId || (xid >= 0 && param->bFineTune)) {
param->E.loss(xid, loss);
}
}
};
#if USE_GPU
class LookupExecute :public Execute {
public:
int dim;
Tensor2D drop_mask;
LookupTable *table;
std::vector<int> xids;
inline void forward() {
int count = batch.size();
drop_mask.init(dim, count);
CalculateDropMask(count, dim, drop_mask);
xids.reserve(count);
std::vector<dtype*> vals;
vals.reserve(count);
for (int idx = 0; idx < count; idx++) {
LookupNode *n = static_cast<LookupNode*>(batch[idx]);
xids.push_back(n->xid);
vals.push_back(n->val.value);
}
n3ldg_cuda::LookupForward(xids, table->E.val.value, bTrain,
drop_mask.value, dynamicDropValue(), count, dim, vals);
#if TEST_CUDA
drop_mask.copyFromDeviceToHost();
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
if (batch.at(0) > 0) {
for (int i = 0; i < count; ++i) {
for (int j = 0; j < dim; ++j) {
dtype v = drop_mask[j][i];
batch[i]->drop_mask[j] = v <= dynamicDropValue() ?
0 : 1;
}
}
}
batch[idx]->forward_drop(bTrain, drop_factor);
int xid = static_cast<LookupNode*>(batch[idx])->xid;
n3ldg_cuda::Assert(batch[idx]->val.verify("lookup forward"));
}
#endif
}
inline void backward() {
int count = batch.size();
std::vector<dtype*> losses;
losses.reserve(count);
for (Node *n : batch) {
losses.push_back(n->loss.value);
}
n3ldg_cuda::LookupBackward(xids, table->nUNKId, table->bFineTune,
losses,
drop_mask.value,
dynamicDropValue(),
count,
dim,
table->E.grad.value,
table->E.dIndexers.value);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
n3ldg_cuda::Assert(table->E.grad.verify("lookup backward grad"));
n3ldg_cuda::Assert(n3ldg_cuda::Verify(table->E.indexers.c_buf(),
table->E.dIndexers.value,
table->E.dIndexers.len,
"lookup backward index"));
#endif
}
};
#else
class LookupExecute :public Execute {
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
#endif
PExecute LookupNode::generate(bool bTrain, dtype cur_drop_factor) {
LookupExecute* exec = new LookupExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
#if USE_GPU
exec->table = param;
exec->dim = dim;
#endif
return exec;
}
#endif /*_LOOKUPTABLE_H*/
|
tensor_transpose.h | // Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef _H_TENSOR_TRANSPOSE
#define _H_TENSOR_TRANSPOSE
#include "tensor_desc.h"
#include "uni.h"
#include "thread_affinity.h"
template <typename T>
inline static void transformToNCHWKernel(
TensorDesc inputDesc, const T *input, TensorDesc outputDesc, T *output)
{
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
if (tensorIs2d(inputDesc)) {
CHECK_STATUS(tensor2dGet(inputDesc, &idt, &idf, &in, &iw));
ic = 1;
ih = 1;
} else if (tensorIs3d(inputDesc)) {
if (inputDesc.df == DF_NHWC) {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ih, &iw));
ic = 1;
} else {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ic, &ih));
iw = 1;
}
} else if (tensorIs4d(inputDesc)) {
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
} else {
UNI_ERROR_LOG("not support transform %d-dim tensor to NCHW format\n", (int)inputDesc.nDims);
return;
}
if (tensorIs3d(outputDesc)) {
CHECK_STATUS(tensor3dGet(outputDesc, &odt, &odf, &on, &oc, &oh));
ow = 1;
} else if (tensorIs4d(outputDesc)) {
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
} else {
UNI_ERROR_LOG("not support transform to %d-dim NCHW tensor\n", (int)outputDesc.nDims);
return;
}
CHECK_REQUIREMENT(idt == odt);
switch (idf) {
case DF_NCHW: {
if (in == on && ic == oc && ih == oh && iw == ow) {
if (output != input) {
memcpy(output, input, tensorNumBytes(outputDesc));
}
} else {
U32 tileSize = UNI_MIN(iw, ow) * bytesOf(idt);
for (U32 n = 0; n < on && n < in; n++) {
for (U32 c = 0; c < oc && c < ic; c++) {
for (U32 h = 0; h < oh && h < ih; h++) {
U32 srcIndex = ((n * ic + c) * ih + h) * iw;
U32 dstIndex = ((n * oc + c) * oh + h) * ow;
memcpy(output + dstIndex, input + srcIndex, tileSize);
}
}
}
}
break;
}
case DF_NCHWC8: {
U32 cx = 8;
U32 ic_a = ic / cx;
U32 minH = UNI_MIN(oh, ih);
U32 minC = UNI_MIN(oc, ic);
for (U32 n = 0; n < on && n < in; n++) {
#ifdef _USE_OPENMP
#pragma omp parallel for num_threads(OMP_NUM_THREADS)
#endif
for (U32 c = 0; c < minC; c++) {
for (U32 h = 0; h < minH; h++) {
for (U32 w = 0; w < ow && w < iw; w++) {
U32 c_a = c / cx;
U32 c_b = c % cx;
U32 srcIndex = (((n * ic_a + c_a) * ih + h) * iw + w) * cx + c_b;
U32 dstIndex = ((n * oc + c) * oh + h) * ow + w;
// support channel cut
output[dstIndex] = input[srcIndex];
}
}
}
}
break;
}
case DF_NCHWC16: {
U32 ic16 = ic / 16;
for (U32 n = 0; n < in; ++n) {
U32 c = 0;
for (; c < ic16; ++c) {
for (U32 h = 0; h < ih; ++h) {
for (U32 w = 0; w < iw; ++w) {
for (U32 cc = 0; cc < 16; ++cc) {
output[n * ic * ih * iw + (c * 16 + cc) * ih * iw + h * iw + w] =
input[n * ic * ih * iw + c * 16 * ih * iw + (h * iw + w) * 16 + cc];
}
}
}
}
c *= 16;
while (c < ic) {
U32 cx = ic - c;
cx = (cx == 12) ? 8 : cx;
for (U32 h = 0; h < ih; ++h) {
for (U32 w = 0; w < iw; ++w) {
for (U32 cc = 0; cc < cx; ++cc) {
output[n * ic * ih * iw + (c + cc) * ih * iw + h * iw + w] =
input[n * ic * ih * iw + c * ih * iw + (h * iw + w) * cx + cc];
}
}
}
c += cx;
}
}
break;
}
case DF_NHWCN8: {
in /= 8;
for (U32 n = 0; n < in; n++) {
for (U32 h = 0; h < oh && h < ih; h++) {
for (U32 w = 0; w < ow && w < iw; w++) {
for (U32 c = 0; c < oc && c < ic; c++) {
for (U32 n8 = 0; n8 < 8; n8++) {
U32 srcIndex = (((n * ih + h) * iw + w) * ic + c) * 8 + n8;
U32 dstIndex = (((n * 8 + n8) * oc + c) * oh + h) * ow + w;
output[dstIndex] = input[srcIndex];
}
}
}
}
}
break;
}
case DF_NHWC: {
for (U32 n = 0; n < on && n < in; n++) {
for (U32 c = 0; c < oc && c < ic; c++) {
for (U32 h = 0; h < oh && h < ih; h++) {
for (U32 w = 0; w < ow && w < iw; w++) {
U32 srcIndex = ((n * ih + h) * iw + w) * ic + c;
U32 dstIndex = ((n * oc + c) * oh + h) * ow + w;
output[dstIndex] = input[srcIndex];
}
}
}
}
break;
}
default: {
UNI_ERROR_LOG("not support transform %s format to NCHW format\n", DataFormatName()[idf]);
}
}
}
inline EE transformToNCHW(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (nullptr == input || nullptr == output) {
return NULL_POINTER;
}
switch (inputDesc.dt) {
#ifdef _USE_FP32
case DT_F32: {
transformToNCHWKernel<F32>(inputDesc, (F32 *)input, outputDesc, (F32 *)output);
break;
}
#endif
#ifdef _USE_FP16
case DT_F16: {
transformToNCHWKernel<F16>(inputDesc, (F16 *)input, outputDesc, (F16 *)output);
break;
}
#endif
#ifdef _USE_INT8
case DT_I8: {
transformToNCHWKernel<INT8>(inputDesc, (INT8 *)input, outputDesc, (INT8 *)output);
break;
}
case DT_U8_Q: {
transformToNCHWKernel<UINT8>(inputDesc, (UINT8 *)input, outputDesc, (UINT8 *)output);
break;
}
#endif
default: {
return NOT_SUPPORTED;
}
}
return SUCCESS;
}
template <typename T>
inline static void transformToNHWCKernel(
TensorDesc inputDesc, const T *input, TensorDesc outputDesc, T *output)
{
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
if (tensorIs2d(inputDesc)) {
CHECK_STATUS(tensor2dGet(inputDesc, &idt, &idf, &in, &iw));
ic = 1;
ih = 1;
} else if (tensorIs3d(inputDesc)) {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ih, &iw));
ic = 1;
} else if (tensorIs4d(inputDesc)) {
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
} else {
UNI_ERROR_LOG("not support transform %d-dim tensor to NHWC format\n", (int)inputDesc.nDims);
return;
}
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
U32 size = tensorNumElements(outputDesc);
U32 ihiw = ih * iw;
switch (idf) {
case DF_NHWC: {
CHECK_REQUIREMENT(tensorNumElements(inputDesc) == size);
if (input != output) {
memcpy(output, input, tensorNumBytes(inputDesc));
}
break;
}
case DF_NCHW: {
CHECK_REQUIREMENT(tensorNumElements(inputDesc) == size);
for (U32 o = 0, srcIndex = 0; o < in; o++) {
for (U32 cc = 0; cc < ic; cc++) {
for (U32 hw = 0; hw < ihiw; hw++, srcIndex++) {
U32 dstIndex = (o * ihiw + hw) * ic + cc;
output[dstIndex] = input[srcIndex];
}
}
}
break;
}
case DF_NCHWC8: {
CHECK_REQUIREMENT(ic % 8 == 0);
ic /= 8;
for (U32 n = 0, srcIndex = 0; n < in; n++) {
for (U32 c = 0; c < ic; c++) {
for (U32 hw = 0; hw < ihiw; hw++) {
for (U32 c8 = 0; c8 < 8; c8++, srcIndex++) {
U32 dstIndex = ((n * ihiw + hw) * ic + c) * 8 + c8;
output[dstIndex] = input[srcIndex];
}
}
}
}
break;
}
default: {
UNI_ERROR_LOG(
"not support transform %s format tensor to NHWC format\n", DataFormatName()[idf]);
}
}
}
inline EE transformToNHWC(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (nullptr == input || nullptr == output) {
return NULL_POINTER;
}
switch (inputDesc.dt) {
#ifdef _USE_FP32
case DT_F32: {
transformToNHWCKernel<F32>(inputDesc, (F32 *)input, outputDesc, (F32 *)output);
break;
}
#endif
#ifdef _USE_FP16
case DT_F16: {
transformToNHWCKernel<F16>(inputDesc, (F16 *)input, outputDesc, (F16 *)output);
break;
}
#endif
#ifdef _USE_INT8
case DT_I8: {
transformToNHWCKernel<INT8>(inputDesc, (INT8 *)input, outputDesc, (INT8 *)output);
break;
}
#endif
default: {
return NOT_SUPPORTED;
}
}
return SUCCESS;
}
inline EE transformNCHWC16ToNCHWC8(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (input == NULL || output == NULL) {
CHECK_STATUS(NULL_POINTER);
}
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
if (tensorIs2d(inputDesc)) {
if (input != output) {
memcpy(output, input, tensorNumBytes(inputDesc));
}
return SUCCESS;
} else if (tensorIs3d(inputDesc)) {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ic, &ih));
CHECK_STATUS(tensor3dGet(outputDesc, &odt, &odf, &on, &oc, &oh));
iw = ow = 1;
} else {
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
}
CHECK_REQUIREMENT(in == on && idf == DF_NCHWC16 && odf == DF_NCHWC8 && idt == odt);
int elementSize = bytesOf(idt);
const U8 *inputPtr = (const U8 *)input;
U8 *outputPtr = (U8 *)output;
oc /= 8;
for (U32 n = 0; n < on && n < in; n++) {
for (U32 c = 0; c < oc; c += 2) {
for (U32 h = 0; h < oh && h < ih; h++) {
for (U32 w = 0; w < ow && w < iw; w++) {
for (U32 c8 = 0; c8 < 2 && c8 + c < oc; ++c8) {
U32 srcIndex =
n * ic * ih * iw + c * ih * iw * 8 + (h * iw + w) * 16 + c8 * 8;
U32 dstIndex = n * ic * ih * iw + (c + c8) * ih * iw * 8 + (h * iw + w) * 8;
memcpy(outputPtr + dstIndex * elementSize,
inputPtr + srcIndex * elementSize, elementSize * 8);
}
}
}
}
}
return SUCCESS;
}
inline EE transformNCHWToNCHWC8(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (input == NULL || output == NULL) {
CHECK_STATUS(NULL_POINTER);
}
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
if (tensorIs2d(inputDesc)) {
if (input != output) {
memcpy(output, input, tensorNumBytes(inputDesc));
}
return SUCCESS;
} else if (tensorIs3d(inputDesc)) {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ic, &ih));
CHECK_STATUS(tensor3dGet(outputDesc, &odt, &odf, &on, &oc, &oh));
iw = ow = 1;
} else {
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
}
CHECK_REQUIREMENT(in == on && idf != DF_NCHWC8 && odf == DF_NCHWC8 && idt == odt);
int elementSize = bytesOf(idt);
const U8 *inputPtr = (const U8 *)input;
U8 *outputPtr = (U8 *)output;
oc /= 8;
for (U32 n = 0; n < on && n < in; n++) {
for (U32 c = 0; c < oc; c++) {
for (U32 h = 0; h < oh && h < ih; h++) {
for (U32 w = 0; w < ow && w < iw; w++) {
for (U32 c8 = 0, c_i = c * 8; c8 < 8; c8++, c_i++) {
U32 dstIndex = ((((n * oc + c) * oh + h) * ow + w) * 8 + c8) * elementSize;
// support channel padding
if (c_i < ic) {
U32 srcIndex = (((n * ic + c_i) * ih + h) * iw + w) * elementSize;
memcpy(outputPtr + dstIndex, inputPtr + srcIndex, elementSize);
} else {
memset(outputPtr + dstIndex, 0, elementSize);
}
}
}
}
}
}
return SUCCESS;
}
inline EE transformNHWCToNCHWC8(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (input == NULL || output == NULL) {
CHECK_STATUS(NULL_POINTER);
}
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
CHECK_REQUIREMENT(in == on && idf == DF_NHWC && odf == DF_NCHWC8 && idt == odt);
int elementSize = bytesOf(idt);
const U8 *inputPtr = (const U8 *)input;
U8 *outputPtr = (U8 *)output;
oc /= 8;
for (U32 n = 0; n < on && n < in; n++) {
for (U32 c = 0; c < oc; c++) {
for (U32 h = 0; h < oh && h < ih; h++) {
for (U32 w = 0; w < ow && w < iw; w++) {
for (U32 c8 = 0, c_i = c * 8; c8 < 8; c8++, c_i++) {
U32 dstIndex = ((((n * oc + c) * oh + h) * ow + w) * 8 + c8) * elementSize;
// support channel padding
if (c_i < ic) {
U32 srcIndex = (((n * ih + h) * iw + w) * ic + c_i) * elementSize;
memcpy(outputPtr + dstIndex, inputPtr + srcIndex, elementSize);
} else {
memset(outputPtr + dstIndex, 0, elementSize);
}
}
}
}
}
}
return SUCCESS;
}
inline EE transformNCHWC8ToNCHWC8ByGroup(
TensorDesc inputDesc, const void *input, int group, TensorDesc outputDesc, void *output)
{
U32 inputSize = tensorNumElements(inputDesc);
U32 outputSize = tensorNumElements(outputDesc);
if (group <= 1 || inputSize == outputSize) {
if (input != output) {
memcpy(output, input, outputSize);
}
return SUCCESS;
}
U32 channelAlignSize = 8;
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw;
U32 on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
CHECK_REQUIREMENT(idt == odt);
CHECK_REQUIREMENT(idf == DF_NCHWC8 && odf == DF_NCHWC8);
U32 icg = ic / group;
U32 ocg = oc / group;
U32 ict = ic / channelAlignSize;
U32 oct = oc / channelAlignSize;
U32 elementSize = bytesOf(idt);
for (U32 n = 0; n < in; n++) {
for (I32 g = 0, od = 0; g < group; g++) {
for (U32 c = 0; c < ocg; c++, od++) {
U32 id = g * icg + c;
U32 id_a = id / channelAlignSize;
U32 od_a = od / channelAlignSize;
U32 id_b = id % channelAlignSize;
U32 od_b = od % channelAlignSize;
for (U32 h = 0; h < oh; h++) {
for (U32 w = 0; w < ow; w++) {
U32 dstIndex =
((((n * oct + od_a) * oh + h) * ow + w) * channelAlignSize + od_b) *
elementSize;
if (h < ih && w < iw) {
U32 srcIndex =
((((n * ict + id_a) * ih + h) * iw + w) * channelAlignSize + id_b) *
elementSize;
memcpy(
(U8 *)output + dstIndex, (const U8 *)input + srcIndex, elementSize);
} else {
memset((U8 *)output + dstIndex, 0, elementSize);
}
}
}
}
}
}
return SUCCESS;
}
template <typename T>
inline static void transformToNCHWC16Kernel(
TensorDesc inputDesc, const T *input, TensorDesc outputDesc, T *output)
{
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
if (tensorIs2d(inputDesc)) {
CHECK_STATUS(tensor2dGet(inputDesc, &idt, &idf, &in, &iw));
ic = 1;
ih = 1;
} else if (tensorIs3d(inputDesc)) {
if (inputDesc.df == DF_NHWC) {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ih, &iw));
ic = 1;
} else {
CHECK_STATUS(tensor3dGet(inputDesc, &idt, &idf, &in, &ic, &ih));
iw = 1;
}
} else if (tensorIs4d(inputDesc)) {
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
} else {
UNI_ERROR_LOG(
"not support transform %d-dim tensor to NCHWC16 format\n", (int)inputDesc.nDims);
return;
}
if (tensorIs3d(outputDesc)) {
CHECK_STATUS(tensor3dGet(outputDesc, &odt, &odf, &on, &oc, &oh));
ow = 1;
} else if (tensorIs4d(outputDesc)) {
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
} else {
UNI_ERROR_LOG("not support transform to %d-dim NCHWC16 tensor\n", (int)outputDesc.nDims);
return;
}
CHECK_REQUIREMENT(idt == odt);
switch (idf) {
case DF_MTK:
case DF_NCHW: {
U32 ic16 = ic / 16;
for (U32 n = 0; n < in; ++n) {
U32 c = 0;
for (; c < ic16; ++c) {
for (U32 h = 0; h < ih; ++h) {
for (U32 w = 0; w < iw; ++w) {
for (U32 cc = 0; cc < 16; ++cc) {
output[n * ic * ih * iw + c * 16 * ih * iw + (h * iw + w) * 16 + cc] =
input[n * ic * ih * iw + (c * 16 + cc) * ih * iw + h * iw + w];
}
}
}
}
c *= 16;
while (c < ic) {
U32 cx = ic - c;
cx = (cx == 12) ? 8 : cx;
for (U32 h = 0; h < ih; ++h) {
for (U32 w = 0; w < iw; ++w) {
for (U32 cc = 0; cc < cx; ++cc) {
output[n * ic * ih * iw + c * ih * iw + (h * iw + w) * cx + cc] =
input[n * ic * ih * iw + (c + cc) * ih * iw + h * iw + w];
}
}
}
c += cx;
}
}
break;
}
case DF_NCHWC8: {
U32 ic16 = ic / 16;
for (U32 n = 0; n < in; ++n) {
U32 c = 0;
for (; c < ic16; ++c) {
for (U32 h = 0; h < ih; ++h) {
for (U32 w = 0; w < iw; ++w) {
for (U32 cc = 0; cc < 16; cc += 8) {
for (U32 c8 = 0; c8 < 8; ++c8) {
output[n * ic * ih * iw + c * 16 * ih * iw + (h * iw + w) * 16 +
cc + c8] = input[n * ic * ih * iw +
(c * 16 + cc) * ih * iw + (h * iw + w) * 8 + c8];
}
}
}
}
}
c *= 16;
while (c < ic) {
U32 cx = ic - c;
cx = (cx == 12) ? 8 : cx;
for (U32 h = 0; h < ih; ++h) {
for (U32 w = 0; w < iw; ++w) {
for (U32 cc = 0; cc < cx; cc += 8) {
for (U32 c8 = 0; c8 < 8; ++c8) {
output[n * ic * ih * iw + c * ih * iw + (h * iw + w) * 8 + cc + c8] =
input[n * ic * ih * iw + (c + cc) * ih * iw +
(h * iw + w) * 8 + c8];
}
}
}
}
c += cx;
}
}
break;
}
default: {
UNI_ERROR_LOG(
"not support transform %s format to NCHWC16 format\n", DataFormatName()[idf]);
}
}
}
inline EE transformToNCHWC16(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (nullptr == input || nullptr == output) {
return NULL_POINTER;
}
switch (inputDesc.dt) {
#ifdef _USE_FP32
case DT_F32: {
transformToNCHWC16Kernel<F32>(inputDesc, (F32 *)input, outputDesc, (F32 *)output);
break;
}
#endif
#ifdef _USE_INT8
case DT_U8_Q: {
transformToNCHWC16Kernel<UINT8>(inputDesc, (UINT8 *)input, outputDesc, (UINT8 *)output);
break;
}
#endif
default: {
return NOT_SUPPORTED;
}
}
return SUCCESS;
}
inline EE transformFormat(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
EE ret = NOT_SUPPORTED;
if (outputDesc.df == DF_NCHW) {
ret = transformToNCHW(inputDesc, input, outputDesc, output);
} else if (outputDesc.df == DF_NCHWC8) {
if (inputDesc.df == DF_NORMAL) {
memcpy(output, input, tensorNumBytes(inputDesc));
ret = SUCCESS;
} else if (inputDesc.df == DF_NCHW || inputDesc.df == DF_MTK) {
ret = transformNCHWToNCHWC8(inputDesc, input, outputDesc, output);
} else if (inputDesc.df == DF_NHWC) {
ret = transformNHWCToNCHWC8(inputDesc, input, outputDesc, output);
} else if (inputDesc.df == DF_NCHWC8) {
ret = transformNCHWC8ToNCHWC8ByGroup(inputDesc, input, 1, outputDesc, output);
} else if (inputDesc.df == DF_NCHWC16) {
ret = transformNCHWC16ToNCHWC8(inputDesc, input, outputDesc, output);
} else {
UNI_ERROR_LOG("layout transpose can not support transform from %s format "
"to NCHWC8 format.\n",
DataFormatName()[inputDesc.df]);
}
} else if (outputDesc.df == DF_NCHWC16) {
ret = transformToNCHWC16(inputDesc, input, outputDesc, output);
} else {
UNI_ERROR_LOG("layout transpose can not support transform to %s format.\n",
DataFormatName()[outputDesc.df]);
}
return ret;
}
inline EE transposeFilter(
TensorDesc inputDesc, const void *input, TensorDesc outputDesc, void *output)
{
if (input == NULL || output == NULL) {
CHECK_STATUS(NULL_POINTER);
}
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
CHECK_REQUIREMENT(idf == odf);
const U8 *inputPtr = (const U8 *)input;
U8 *outputPtr = (U8 *)output;
switch (idf) {
case DF_NHWCN8: {
CHECK_REQUIREMENT(in % 8 == 0);
in /= 8;
U32 hwMax = ih * iw - 1;
U32 innerSize = bytesOf(idt) * ic * 8;
for (U32 o = 0; o < in; o++) {
for (U32 hw = 0; hw < ih * iw; hw++) {
U32 srcIndex = o * ih * iw * innerSize + hw * innerSize;
U32 dstIndex = o * ih * iw * innerSize + (hwMax - hw) * innerSize;
memcpy(outputPtr + dstIndex, inputPtr + srcIndex, innerSize);
}
}
break;
}
default: {
CHECK_STATUS(NOT_SUPPORTED);
}
}
return SUCCESS;
}
#endif
|
GB_sparse_masker_template.c | //------------------------------------------------------------------------------
// GB_sparse_masker_template: R = masker (C, M, Z) where R is sparse/hyper
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Computes C<M>=Z or C<!M>=Z, returning the result in R, which is sparse or
// hypersparse. The input matrix C is not modified. Effectively, this
// computes R=C and then R<M>=Z or R<!M>=Z. If the C_replace descriptor is
// enabled, then C has already been cleared, and is an empty (but non-NULL)
// matrix.
// phase1: does not compute R itself, but just counts the # of entries in each
// vector of R. Fine tasks compute the # of entries in their slice of a
// single vector of R, and the results are cumsum'd.
// phase2: computes R, using the counts computed by phase1.
// C is sparse or hypersparse. M and Z can have any sparsity structure.
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse bitmap sparse
// sparse sparse full sparse
// sparse bitmap sparse sparse
// sparse full sparse sparse
// FUTURE:: add special cases for C==Z, C==M, and Z==M aliases
//------------------------------------------------------------------------------
// R(i,j) = Z(i,j) when Z is sparse or hypersparse
//------------------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
#define GB_COPY_Z \
{ \
rjnz++ ; \
}
#else
#define GB_COPY_Z \
{ \
Ri [pR] = i ; \
memcpy (Rx +(pR)*rsize, Zx +(pZ)*rsize, rsize) ; \
pR++ ; \
}
#endif
//------------------------------------------------------------------------------
// R(i,j) = Z(i,j) when Z is bitmap or full
//------------------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
#define GB_COPY_Z_BITMAP_OR_FULL \
{ \
rjnz += GBB (Zb, pZ_start + i - iZ_first) ; \
}
#else
#define GB_COPY_Z_BITMAP_OR_FULL \
{ \
int64_t pZ = pZ_start + i - iZ_first ; \
if (GBB (Zb, pZ)) \
{ \
Ri [pR] = i ; \
memcpy (Rx +(pR)*rsize, Zx +(pZ)*rsize, rsize) ; \
pR++ ; \
} \
}
#endif
//------------------------------------------------------------------------------
// R(i,j) = C(i,j)
//------------------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
#define GB_COPY_C \
{ \
rjnz++ ; \
}
#else
#define GB_COPY_C \
{ \
Ri [pR] = i ; \
memcpy (Rx +(pR)*rsize, Cx +(pC)*rsize, rsize) ; \
pR++ ; \
}
#endif
//------------------------------------------------------------------------------
// template for R = masker (C, M, Z) when R is sparse or hypersparse
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j)
// phase2: compute C
//--------------------------------------------------------------------------
ASSERT (C_is_sparse || C_is_hyper) ;
#pragma omp parallel for num_threads(R_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < R_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of R
//------------------------------------------------------------------
int64_t j = GBH (Rh, k) ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t rjnz = 0 ;
#else
int64_t pR, pR_end ;
if (fine_task)
{
// A fine task computes a slice of R(:,j)
pR = TaskList [taskid ].pC ;
pR_end = TaskList [taskid+1].pC ;
ASSERT (Rp [k] <= pR && pR <= pR_end && pR_end <= Rp [k+1]) ;
}
else
{
// The vectors of R are never sliced for a coarse task.
pR = Rp [k] ;
pR_end = Rp [k+1] ;
}
int64_t rjnz = pR_end - pR ;
if (rjnz == 0)
{
continue ;
}
#endif
//------------------------------------------------------------------
// get C(:,j)
//------------------------------------------------------------------
int64_t pC = -1, pC_end = -1 ;
if (fine_task)
{
// A fine task operates on Ci,Cx [pC...pC_end-1], which is
// a subset of the vector C(:,j)
pC = TaskList [taskid].pA ;
pC_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector C(:,j)
int64_t kC = (R_to_C == NULL) ? j : R_to_C [k] ;
if (kC >= 0)
{
pC = Cp [kC] ;
pC_end = Cp [kC+1] ;
}
}
int64_t cjnz = pC_end - pC ; // nnz in C(:,j) for this slice
bool cdense = (cjnz == len) && (cjnz > 0) ;
#if defined ( GB_PHASE_2_OF_2 ) || defined ( GB_DEBUG )
// get the first index in C(:,j) for this vector
int64_t iC_first = -1 ;
if (cjnz > 0) iC_first = Ci [pC] ;
#endif
#ifdef GB_DEBUG
int64_t iC_last = -1 ;
if (cjnz > 0) iC_last = Ci [pC_end-1] ;
#endif
//------------------------------------------------------------------
// get Z(:,j)
//------------------------------------------------------------------
int64_t pZ = -1, pZ_end = -1 ;
if (fine_task)
{
// A fine task operates on Zi,Zx [pZ...pZ_end-1], which is
// a subset of the vector Z(:,j)
pZ = TaskList [taskid].pB ;
pZ_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector Z(:,j)
int64_t kZ = (R_to_Z == NULL) ? j : R_to_Z [k] ;
if (kZ >= 0)
{
pZ = GBP (Zp, kZ, vlen) ;
pZ_end = GBP (Zp, kZ+1, vlen) ;
}
}
int64_t zjnz = pZ_end - pZ ; // nnz in Z(:,j) for this slice
int64_t pZ_start = pZ ;
bool zdense = (zjnz == len) && (zjnz > 0) ;
int64_t iZ_first = -1, iZ_last = -1 ;
if (zjnz > 0)
{
iZ_first = GBI (Zi, pZ, vlen) ;
iZ_last = GBI (Zi, pZ_end-1, vlen) ;
}
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
int64_t pM = -1, pM_end = -1 ;
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1], which is
// a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
// A coarse task operates on the entire vector M (:,j)
int64_t kM = (R_to_M == NULL) ? j : R_to_M [k] ;
if (kM >= 0)
{
pM = GBP (Mp, kM, vlen) ;
pM_end = GBP (Mp, kM+1, vlen) ;
}
}
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
bool mdense = (mjnz == len) && (mjnz > 0) ;
// get the first index in M(:,j) for this vector
int64_t iM_first = -1 ;
int64_t pM_first = pM ;
if (mjnz > 0) iM_first = GBI (Mi, pM_first, vlen) ;
//------------------------------------------------------------------
// R(:,j) = masker (C (:,j), M (:,j), Z (:,j))
//------------------------------------------------------------------
if (Z_is_bitmap || Z_is_full)
{
//--------------------------------------------------------------
// Method01: Z is bitmap or full; M is sparse or hypersparse
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse sparse bitmap sparse
// sparse sparse full sparse
// M is sparse or hypersparse, and not complemented.
// Otherwise, R is bitmap and not computed here, but in
// GB_bitmap_masker_template instead.
ASSERT (M_is_sparse || M_is_hyper) ;
ASSERT (!Mask_comp) ;
// 2-way merge of C(:,j) and M(:,j) and direct lookup of Z
while (pC < pC_end && pM < pM_end)
{
int64_t iC = Ci [pC] ;
int64_t iM = Mi [pM] ;
if (iC < iM)
{
// C(i,j) is present but M(i,j) is not
// R(i,j) = C(i,j)
int64_t i = iC ;
GB_COPY_C ;
pC++ ;
}
else if (iC > iM)
{
// M(i,j) is present but C(i,j) is not
int64_t i = iM ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
// R(i,j) = Z(i,j)
GB_COPY_Z_BITMAP_OR_FULL ;
}
pM++ ;
}
else
{
// both C(i,j) and M(i,j) are present
int64_t i = iM ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
// R(i,j) = Z(i,j)
GB_COPY_Z_BITMAP_OR_FULL ;
}
else
{
// R(i,j) = C(i,j)
GB_COPY_C ;
}
pC++ ;
pM++ ;
}
}
// if M(:,j) is exhausted ; continue scanning all of C(:,j)
#if defined ( GB_PHASE_1_OF_2 )
rjnz += (pC_end - pC) ;
#else
for ( ; pC < pC_end ; pC++)
{
// C(i,j) is present but M(i,j) is not
int64_t i = Ci [pC] ;
GB_COPY_C ;
}
#endif
// if C(:,j) is exhausted ; continue scanning all of M(:,j)
for ( ; pM < pM_end ; pM++)
{
// M(i,j) is present but C(i,j) is not
int64_t i = Mi [pM] ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
// R(i,j) = Z(i,j)
GB_COPY_Z_BITMAP_OR_FULL ;
}
}
}
else if (mjnz == 0)
{
//--------------------------------------------------------------
// Z is sparse or hypersparse, M(:,j) is empty
//--------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// Z must be sparse or hypersparse
ASSERT (Z_is_sparse || Z_is_hyper) ;
if (!Mask_comp)
{
//----------------------------------------------------------
// Method02: M(:,j) is empty and not complemented
//----------------------------------------------------------
// R(:,j) = C(:,j), regardless of Z(:,j)
#if defined ( GB_PHASE_1_OF_2 )
rjnz = cjnz ;
#else
ASSERT (rjnz == cjnz) ;
memcpy (Ri +(pR), Ci +(pC), cjnz * sizeof (int64_t)) ;
memcpy (Rx +(pR)*rsize, Cx +(pC)*rsize, cjnz*rsize) ;
#endif
}
else
{
//----------------------------------------------------------
// Method03: M(:,j) is empty and complemented
//----------------------------------------------------------
// R(:,j) = Z(:,j), regardless of C(:,j)
#if defined ( GB_PHASE_1_OF_2 )
rjnz = zjnz ;
#else
ASSERT (rjnz == zjnz) ;
memcpy (Ri +(pR), Zi +(pZ), zjnz * sizeof (int64_t)) ;
memcpy (Rx +(pR)*rsize, Zx +(pZ)*rsize, zjnz*rsize) ;
#endif
}
}
else if (cdense && zdense)
{
//--------------------------------------------------------------
// Method03: C(:,j) and Z(:,j) dense: thus R(:,j) dense
//--------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// sparse bitmap sparse sparse
// sparse full sparse sparse
// Both C(:,j) and Z(:,j) are dense (that is, all entries
// present), but both C and Z are stored in a sparse or
// hypersparse sparsity structure. M has any sparsity.
ASSERT (Z_is_sparse || Z_is_hyper) ;
ASSERT (cjnz == zjnz) ;
ASSERT (iC_first == iZ_first) ;
ASSERT (iC_last == iZ_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
rjnz = cjnz ;
#else
ASSERT (rjnz == cjnz) ;
for (int64_t p = 0 ; p < cjnz ; p++)
{
int64_t i = p + iC_first ;
Ri [pR + p] = i ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, vlen) : INT64_MAX;
bool mij = false ;
if (i == iM)
{
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
pM++ ;
}
if (Mask_comp) mij = !mij ;
if (mij)
{
// R(i,j) = Z (i,j)
memcpy (Rx +(pR+p)*rsize, Zx +(pZ+p)*rsize, rsize) ;
}
else
{
// R(i,j) = C (i,j)
memcpy (Rx +(pR+p)*rsize, Cx +(pC+p)*rsize, rsize) ;
}
}
#endif
}
else
{
//--------------------------------------------------------------
// Method04: 2-way merge of C(:,j) and Z(:,j)
//--------------------------------------------------------------
// Z is sparse or hypersparse; M has any sparsity structure
ASSERT (Z_is_sparse || Z_is_hyper) ;
//--------------------------------------------------------------
// Z is sparse or hypersparse, M has any sparsity
//--------------------------------------------------------------
// ------------------------------------------
// C <!M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <M> = Z R
// ------------------------------------------
// sparse sparse sparse sparse
// sparse bitmap sparse sparse
// sparse full sparse sparse
while (pC < pC_end && pZ < pZ_end)
{
//----------------------------------------------------------
// get the next i for R(:,j)
//----------------------------------------------------------
int64_t iC = Ci [pC] ;
int64_t iZ = Zi [pZ] ;
int64_t i = GB_IMIN (iC, iZ) ;
//----------------------------------------------------------
// get M(i,j)
//----------------------------------------------------------
bool mij = false ;
if (mdense)
{
//------------------------------------------------------
// Method04a: M(:,j) is dense
//------------------------------------------------------
// mask is dense, lookup M(i,j)
// iM_first == Mi [pM_first]
// iM_first + delta == Mi [pM_first + delta]
// let i = iM_first + delta
// let pM = pM_first + delta
// then delta = i - iM_first
pM = pM_first + (i - iM_first) ;
ASSERT (i == GBI (Mi, pM, vlen)) ;
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
// increment pM for the wrapup phase below
pM++ ;
}
else
{
//------------------------------------------------------
// Method04b: M(:,j) is sparse
//------------------------------------------------------
// Use GB_SPLIT_BINARY_SEARCH so that pM can be used in
// the for loop with index pM in the wrapup phase.
ASSERT (M_is_sparse || M_is_hyper) ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (i, Mi, pM, pright, found) ;
if (found)
{
ASSERT (i == Mi [pM]) ;
mij = GB_mcast (Mx, pM, msize) ;
// increment pM for the wrapup phase below
pM++ ;
}
}
if (Mask_comp) mij = !mij ;
//----------------------------------------------------------
// R(i,j) = C(i,j) or Z(i,j)
//----------------------------------------------------------
if (iC < iZ)
{
// C(i,j) is present but Z(i,j) is not
if (!mij) GB_COPY_C ;
pC++ ;
}
else if (iC > iZ)
{
// Z(i,j) is present but C(i,j) is not
if (mij) GB_COPY_Z ;
pZ++ ;
}
else
{
// both C(i,j) and Z(i,j) are present
int64_t i = iC ;
if (mij)
{
GB_COPY_Z ;
}
else
{
GB_COPY_C ;
}
pC++ ;
pZ++ ;
}
}
//--------------------------------------------------------------
// Method04: wrapup: C or Z are exhausted, or initially empty
//--------------------------------------------------------------
cjnz = pC_end - pC ; // nnz (C(:,j)) remaining
zjnz = pZ_end - pZ ; // nnz (Z(:,j)) remaining
mjnz = pM_end - pM ; // nnz (M(:,j)) remaining
if (cjnz == 0)
{
//----------------------------------------------------------
// C(:,j) is empty
//----------------------------------------------------------
if (!Mask_comp)
{
//------------------------------------------------------
// mask is not complemented
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// Method04c: M(:,j) is dense
//--------------------------------------------------
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == GBI (Mi, pM, vlen)) ;
bool mij = GBB (Mb, pM) &&
GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_Z ;
}
}
else if (zjnz > 32 * mjnz)
{
//--------------------------------------------------
// Method04d: Z(:,j) is much denser than M(:,j)
//--------------------------------------------------
// This loop requires pM to start at the first
// entry in M(:,j) that has not yet been handled.
ASSERT (M_is_sparse || M_is_hyper) ;
for ( ; pM < pM_end ; pM++)
{
if (GB_mcast (Mx, pM, msize))
{
int64_t i = Mi [pM] ;
int64_t pright = pZ_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Zi, pZ, pright, found);
if (found) GB_COPY_Z ;
}
}
}
else if (mjnz > 32 * zjnz)
{
//--------------------------------------------------
// Method04e: M(:,j) is much denser than Z(:,j)
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright,found) ;
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_Z ;
}
}
else
{
//--------------------------------------------------
// Method04f: M(:,j) and Z(:,j) about same # entries
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
while (pM < pM_end && pZ < pZ_end)
{
int64_t iM = Mi [pM] ;
int64_t i = Zi [pZ] ;
if (iM < i)
{
// M(i,j) exists but not Z(i,j)
pM++ ;
}
else if (i < iM)
{
// Z(i,j) exists but not M(i,j)
pZ++ ;
}
else
{
// both M(i,j) and Z(i,j) exist
if (GB_mcast (Mx, pM, msize)) GB_COPY_Z ;
pM++ ;
pZ++ ;
}
}
}
}
else
{
//------------------------------------------------------
// complemented mask, and C(:,j) empty
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// Method04g: M(:,j) is dense
//--------------------------------------------------
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == GBI (Mi, pM, vlen)) ;
bool mij = GBB (Mb, pM) &&
GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_Z ; // mask is complemented
}
}
else
{
//--------------------------------------------------
// Method04h: M(:,j) is sparse
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
for ( ; pZ < pZ_end ; pZ++)
{
int64_t i = Zi [pZ] ;
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_Z ; // mask is complemented
}
}
}
}
else if (zjnz == 0)
{
//----------------------------------------------------------
// Z(:,j) is empty
//----------------------------------------------------------
if (Mask_comp)
{
//------------------------------------------------------
// mask is complemented
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// Method04i: M(:,j) is dense
//--------------------------------------------------
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == GBI (Mi, pM, vlen)) ;
bool mij = GBB (Mb, pM) &&
GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_C ;
}
}
else if (cjnz > 32 * mjnz)
{
//--------------------------------------------------
// Method04j: C(:,j) is much denser than M(:,j)
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
for ( ; pM < pM_end ; pM++)
{
if (GB_mcast (Mx, pM, msize))
{
int64_t i = Mi [pM] ;
int64_t pright = pC_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ci, pC, pright, found);
if (found) GB_COPY_C ;
}
}
}
else if (mjnz > 32 * cjnz)
{
//--------------------------------------------------
// Method04k: M(:,j) is much denser than C(:,j)
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found);
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (mij) GB_COPY_C ;
}
}
else
{
//--------------------------------------------------
// Method04l: M(:,j) and C(:,j) about same # entries
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
while (pM < pM_end && pC < pC_end)
{
int64_t iM = Mi [pM] ;
int64_t i = Ci [pC] ;
if (iM < i)
{
// M(i,j) exists but not C(i,j)
pM++ ;
}
else if (i < iM)
{
// C(i,j) exists but not M(i,j)
pC++ ;
}
else
{
// both M(i,j) and C(i,j) exist
if (GB_mcast (Mx, pM, msize)) GB_COPY_C ;
pM++ ;
pC++ ;
}
}
}
}
else
{
//------------------------------------------------------
// non-complemented mask, and Z(:,j) empty
//------------------------------------------------------
if (mdense)
{
//--------------------------------------------------
// Method04m: M(:,j) is dense
//--------------------------------------------------
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
// mask is dense, lookup M(i,j)
pM = pM_first + (i - iM_first) ;
ASSERT (i == GBI (Mi, pM, vlen)) ;
bool mij = GBB (Mb, pM) &&
GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_C ;
}
}
else
{
//--------------------------------------------------
// Method04n: M(:,j) is sparse
//--------------------------------------------------
ASSERT (M_is_sparse || M_is_hyper) ;
for ( ; pC < pC_end ; pC++)
{
int64_t i = Ci [pC] ;
// M(i,j) false if not present
bool mij = false ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
if (found) mij = GB_mcast (Mx, pM, msize) ;
if (!mij) GB_COPY_C ;
}
}
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pR == pR_end) ;
#endif
}
//------------------------------------------------------------------
// final count of nnz (R(:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = rjnz ;
}
else
{
Rp [k] = rjnz ;
}
#endif
}
}
}
|
acoPlacement.c | /***************************************************************************
Name : acoPlacement.c
Version : 1.0
Author(s) : Panayiotis Danassis (panos_dan@hotmail.com)
Date : May 6, 2015
Description : 3-D FPGA placement algorithm based on Ant Colony Optimization.
-----------
Copyright (C) 2015 Panayiotis Danassis
School of ECE, National Technical University of Athens.
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include "acoPlacement.h"
#include "ants.h"
#include "inOut.h"
#include "timer.h"
int grid_size;
int numberOfLayers;
int numberOfThreads = 1;
int iteration = 0; /* current iteration */
int foundBetter = FALSE; /* A better solution has been found since last call of print_results() */
double time_used;
/* Main routine for running the ACO algorithm */
int main(int argc, char *argv[]) {
int i;
printf(BOLDWHITE"\nacoPlacement - A 3-D FPGA placement algorithm based on Ant Colony Optimization.\n");
printf("Created by Panayiotis Danassis (panos_dan@hotmail.com) with the valuable contribution of Kostas Siozios (ksiop@microlab.ntua.gr)\n");
printf("This software is licensed under the MIT License (see README.md).\n\n"RESET);
#ifdef DEBUG
printf("------------ <main> ------------\n");
printf("DEBUG MODE ON\n");
#endif
#ifdef PARALLEL
printf("PARALLEL MODE ON\n");
#endif
start_timers();
parse_parameters(argc, argv); /* Set ACO parameters and I/O filenames */
parse_netlist(); /* Read input netlist */
parse_hypernets(); /* Read hypernets */
parse_nets(); /* Read nets */
parse_layers(); /* Read nade's layer info */
time_used = elapsed_time();
printf("Reading Input Files [ OK ]\n");
printf("Reading took %.10f seconds\n", time_used);
start_timers();
init_ants();
printf("Initializing ants [ OK ]\n");
init_timing_matrices();
printf("Initializing timing matrices [ OK ]\n");
time_used = elapsed_time();
printf("Initialization took %.10f seconds\n", time_used);
start_timers();
init_heuristic_matrix();
time_used = elapsed_time();
printf("Initializing heuristic matrix [ OK ]\n");
printf("Initialization took %.10f seconds\n", time_used);
start_timers();
init_pheromone_matrix();
time_used = elapsed_time();
printf("Initializing pheromone matrix [ OK ]\n");
printf("Initialization took %.10f seconds\n", time_used);
start_timers();
init_total_matrix();
time_used = elapsed_time();
printf("Initializing total matrix [ OK ]\n");
printf("Initialization took %.10f seconds\n", time_used);
printf("\n");
print_results();
foundBetter = FALSE;
#ifdef PARALLEL
omp_set_num_threads(numberOfThreads);
#endif
iteration = 1;
while (!termination_condition()) {
printf("Iteration = %d\n", iteration);
//change_parameters_according_to_schedule();
#ifdef PARALLEL
#pragma omp parallel for private(i) shared(iteration, foundBetter, best_so_far_ant)
#endif
for (i = 1; i <= n_ants; i++) {
#ifdef DEBUG
printf("Ant = %d\n", i);
#endif
construct_solution(&ant[i]);
#ifdef PARALLEL
compute_placement_quality_parallel(&ant[i]);
compare_best_so_far_ant(&ant[i]);
#else
compute_placement_quality(&ant[i]);
compare_best_so_far_ant(&ant[i]);
compare_iteration_best_ant(&ant[i]);
local_pheromone_trail_update(&ant[i]);
#endif
}
if (foundBetter == TRUE) {
if (iteration % printStep == 0) {
print_results(); /* Print results periodically */
foundBetter = FALSE;
}
}
if (iteration % restart == 0) {
reinit_pheromone_matrix(); /* Reinitialize pheromone matrix to avoid stagnation */
}
if (iteration % exportPlacementStep == 0) {
export_placement(best_so_far_ant.nodePosition, best_so_far_ant.grid); /* Export placement file */
}
global_pheromone_trail_update();
#ifndef PARALLEL
reinit_iteration_best_ant();
#endif
iteration++;
}
export_placement(best_so_far_ant.nodePosition, best_so_far_ant.grid);
printf("Done!\n");
/* Free all allocated memory */
free_allocated_memory();
#ifdef DEBUG
printf("------------ </main> ------------\n");
#endif
return 0;
} |
matmul.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void matmul(level_type * level, double *C, int * id_A, int * id_B, int rows, int cols, int A_equals_B_transpose){
// *id_A = m vector_id's (conceptually pointers to the rows of a m x level->num_my_boxes*volume matrix)
// *id_B = n vector_id's (conceptually pointers to the columns of a level->num_my_boxes*volume matrix x n)
// *C is a mxn matrix where C[rows][cols] = dot(id_A[rows],id_B[cols])
// FIX, id_A and id_B are likely the same and thus C[][] will be symmetric (modulo missing row?)
// if(A_equals_B_transpose && (cols>=rows)) then use id_B and only run for nn>=mm // common case for s-step Krylov methods
// C_is_symmetric && cols< rows (use id_A)
int mm,nn;
uint64_t _timeStart = CycleTime();
// FIX... rather than performing an all_reduce on the essentially symmetric [G,g], do the all_reduce on the upper triangle and then duplicate (saves BW)
// #pragma omp parallel for schedule(static,1) collapse(2)
hclib::finish([&rows, &cols, &level, &id_A, &C, &id_B] {
hclib::loop_domain_2d loop(rows, cols);
hclib::forasync2D_nb(&loop, [&level, &id_A, &C, &id_B, &cols, &rows] (int mm, int nn) {
if(nn>=mm){ // upper triangular
int box;
double a_dot_b_level = 0.0;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
box_type *lbox = (box_type *)&(level->my_boxes[box]);
const int jStride = lbox->jStride;
const int kStride = lbox->kStride;
const int ghosts = lbox->ghosts;
const int dim = lbox->dim;
double * __restrict__ grid_a = (double *) (lbox->vectors[id_A[mm]] + ghosts*(1+jStride+kStride)); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_b = (double *) (lbox->vectors[id_B[nn]] + ghosts*(1+jStride+kStride));
double a_dot_b_box = 0.0;
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
a_dot_b_box += grid_a[ijk]*grid_b[ijk];
}}}
a_dot_b_level+=a_dot_b_box;
}
C[mm*cols + nn] = a_dot_b_level; // C[mm][nn]
if((mm<cols)&&(nn<rows)){C[nn*cols + mm] = a_dot_b_level;}// C[nn][mm]
}
}, false, FORASYNC_MODE_FLAT);
});
level->cycles.blas3 += (uint64_t)(CycleTime()-_timeStart);
#ifdef USE_MPI
double *send_buffer = (double*)malloc(rows*cols*sizeof(double));
for(mm=0;mm<rows;mm++){
for(nn=0;nn<cols;nn++){
send_buffer[mm*cols + nn] = C[mm*cols + nn];
}}
uint64_t _timeStartAllReduce = CycleTime();
hclib::MPI_Allreduce(send_buffer,C,rows*cols,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE);
uint64_t _timeEndAllReduce = CycleTime();
level->cycles.collectives += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
free(send_buffer);
#endif
}
|
bugged2.c | /******************************************************************************
* ЗАДАНИЕ: bugged2.c
* ОПИСАНИЕ:
* Еще одна программа на OpenMP с багом.
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
int nthreads, i, tid;
float total;
#pragma omp parallel
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d is starting...\n", tid);
#pragma omp barrier
total = 0.0;
// Для безопасного суммирования надо использовать reduction
//#pragma omp for schedule(dynamic, 10)
#pragma omp for schedule(dynamic, 10) reduction(+:total)
for (i = 0; i < 1000000; i++)
{
total = total + i*1.0;
}
printf ("Thread %d is done! Total= %e\n", tid, total);
}
}
|
pbkdf2-hmac-sha1_fmt_plug.c | /*
* This software is Copyright (c) 2013 magnum and it is hereby released to
* the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pbkdf2_hmac_sha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pbkdf2_hmac_sha1);
#else
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "base64_convert.h"
#include "stdint.h"
#define PBKDF2_HMAC_SHA1_ALSO_INCLUDE_CTX 1
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PBKDF2-HMAC-SHA1"
#define FORMAT_NAME ""
#define FORMAT_TAG "$pbkdf2-hmac-sha1$"
#define PKCS5S2_TAG "{PKCS5S2}"
#define PK5K2_TAG "$p5k2$"
#define TAG_LEN (sizeof(FORMAT_TAG) - 1)
#ifdef MMX_COEF
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define MAX_BINARY_SIZE (4 * BINARY_SIZE)
#define MAX_SALT_SIZE 64
#define MAX_CIPHERTEXT_LENGTH (TAG_LEN + 6 + 1 + 2*MAX_SALT_SIZE + 1 + 2*MAX_BINARY_SIZE)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT MMX_COEF
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define PAD_SIZE 64
#define PLAINTEXT_LENGTH 125
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
static struct fmt_tests tests[] = {
{"$pbkdf2-hmac-sha1$1000.fd11cde0.27de197171e6d49fc5f55c9ef06c0d8751cd7250", "3956"},
{"$pbkdf2-hmac-sha1$1000.6926d45e.231c561018a4cee662df7cd4a8206701c5806af9", "1234"},
{"$pbkdf2-hmac-sha1$1000.98fcb0db.37082711ff503c2d2dea9a5cf7853437c274d32e", "5490"},
// WPA-PSK DK (raw key as stored by some routers)
// ESSID was "Harkonen" - converted to hex 4861726b6f6e656e
{"$pbkdf2-hmac-sha1$4096$4861726b6f6e656e$ee51883793a6f68e9615fe73c80a3aa6f2dd0ea537bce627b929183cc6e57925", "12345678"},
// these get converted in prepare()
// http://pythonhosted.org/passlib/lib/passlib.hash.atlassian_pbkdf2_sha1.html
{"{PKCS5S2}DQIXJU038u4P7FdsuFTY/+35bm41kfjZa57UrdxHp2Mu3qF2uy+ooD+jF5t1tb8J", "password"},
// http://pythonhosted.org/passlib/lib/passlib.hash.cta_pbkdf2_sha1.html
{"$p5k2$2710$oX9ZZOcNgYoAsYL-8bqxKg==$AU2JLf2rNxWoZxWxRCluY0u6h6c=", "password" },
{NULL}
};
static struct custom_salt {
unsigned int length;
unsigned int rounds;
unsigned int use_utf8;
//unsigned int outlen; /* Not used yet */
unsigned char salt[MAX_SALT_SIZE];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static char *prepare(char *fields[10], struct fmt_main *self)
{
static char Buf[256];
if (strncmp(fields[1], PKCS5S2_TAG, 9) != 0 && strncmp(fields[1], PK5K2_TAG, 6))
return fields[1];
if (!strncmp(fields[1], PKCS5S2_TAG, 9)) {
char tmp[120];
if (strlen(fields[1]) > 75) return fields[1];
//{"{PKCS5S2}DQIXJU038u4P7FdsuFTY/+35bm41kfjZa57UrdxHp2Mu3qF2uy+ooD+jF5t1tb8J", "password"},
//{"$pbkdf2-hmac-sha1$10000.0d0217254d37f2ee0fec576cb854d8ff.edf96e6e3591f8d96b9ed4addc47a7632edea176bb2fa8a03fa3179b75b5bf09", "password"},
base64_convert(&(fields[1][9]), e_b64_mime, strlen(&(fields[1][9])), tmp, e_b64_hex, sizeof(tmp), 0);
sprintf(Buf, "$pbkdf2-hmac-Sha1$10000.%32.32s.%s", tmp, &tmp[32]);
return Buf;
}
if (!strncmp(fields[1], PK5K2_TAG, 6)) {
char tmps[140], tmph[70], *cp, *cp2;
unsigned iter=0;
// salt was listed as 1024 bytes max. But our max salt size is 64 bytes (~90 base64 bytes).
if (strlen(fields[1]) > 128) return fields[1];
//{"$p5k2$2710$oX9ZZOcNgYoAsYL-8bqxKg==$AU2JLf2rNxWoZxWxRCluY0u6h6c=", "password" },
//{"$pbkdf2-hmac-sha1$10000.a17f5964e70d818a00b182fef1bab12a.014d892dfdab3715a86715b144296e634bba87a7", "password"},
cp = fields[1];
cp += 6;
while (*cp && *cp != '$') {
iter *= 0x10;
iter += atoi16[ARCH_INDEX(*cp)];
++cp;
}
if (*cp != '$') return fields[1];
++cp;
cp2 = strchr(cp, '$');
if (!cp2) return fields[1];
base64_convert(cp, e_b64_mime, cp2-cp, tmps, e_b64_hex, sizeof(tmps), flg_Base64_MIME_DASH_UNDER);
++cp2;
base64_convert(cp2, e_b64_mime, strlen(cp2), tmph, e_b64_hex, sizeof(tmph), flg_Base64_MIME_DASH_UNDER);
sprintf(Buf, "$pbkdf2-hmac-Sha1$%d.%s.%s", iter, tmps, tmph);
return Buf;
}
return fields[1];
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
size_t len;
char *delim;
if (strncasecmp(ciphertext, FORMAT_TAG, TAG_LEN))
return 0;
if (strlen(ciphertext) > MAX_CIPHERTEXT_LENGTH)
return 0;
ciphertext += TAG_LEN;
delim = strchr(ciphertext, '.') ? "." : "$";
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
if (!(ptr = strtok(ctcopy, delim)))
goto error;
if (!atoi(ptr))
goto error;
if (!(ptr = strtok(NULL, delim)))
goto error;
len = strlen(ptr); // salt hex length
if (len > 2 * MAX_SALT_SIZE || len & 1)
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, delim)))
goto error;
len = strlen(ptr); // binary length
if (len < BINARY_SIZE || len > MAX_BINARY_SIZE || len & 1)
goto error;
if (!ishex(ptr))
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[MAX_CIPHERTEXT_LENGTH + 1];
strnzcpy(out, ciphertext, sizeof(out));
strlwr(out);
return out;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int saltlen;
char delim;
if (!strncasecmp(ciphertext, FORMAT_TAG, sizeof(FORMAT_TAG) - 1))
ciphertext += sizeof(FORMAT_TAG) - 1;
cs.use_utf8 = ciphertext[13] == 'S';
cs.rounds = atoi(ciphertext);
delim = strchr(ciphertext, '.') ? '.' : '$';
ciphertext = strchr(ciphertext, delim) + 1;
p = strchr(ciphertext, delim);
saltlen = 0;
memset(cs.salt, 0, sizeof(cs.salt));
while (ciphertext < p) { /** extract salt **/
cs.salt[saltlen++] =
atoi16[ARCH_INDEX(ciphertext[0])] * 16 +
atoi16[ARCH_INDEX(ciphertext[1])];
ciphertext += 2;
}
cs.length = saltlen;
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[MAX_BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i, len;
char delim;
delim = strchr(ciphertext, '.') ? '.' : '$';
p = strrchr(ciphertext, delim) + 1;
len = strlen(p) / 2;
for (i = 0; i < len && *p; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#if !ARCH_LITTLE_ENDIAN
for (i = 0; i < len/sizeof(ARCH_WORD_32); ++i) {
((ARCH_WORD_32*)out)[i] = JOHNSWAP(((ARCH_WORD_32*)out)[i]);
}
#endif
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, cur_salt->length,
cur_salt->rounds, &(x.poutc),
BINARY_SIZE, 0);
#else
pbkdf2_sha1((const unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index],
BINARY_SIZE, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
/* Check the FULL binary, just for good measure. There is no chance we'll
have a false positive here but this function is not performance sensitive. */
static int cmp_exact(char *source, int index)
{
int i = 0, len, result;
char *p;
char delim;
unsigned char *binary, *crypt;
delim = strchr(source, '.') ? '.' : '$';
p = strrchr(source, delim) + 1;
len = strlen(p) / 2;
if (len == BINARY_SIZE) return 1;
binary = mem_alloc(len);
crypt = mem_alloc(len);
while (*p) {
binary[i++] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#if !ARCH_LITTLE_ENDIAN
for (i = 0; i < len/sizeof(ARCH_WORD_32); ++i) {
((ARCH_WORD_32*)binary)[i] = JOHNSWAP(((ARCH_WORD_32*)binary)[i]);
}
#endif
pbkdf2_sha1((const unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
cur_salt->salt, cur_salt->length,
cur_salt->rounds, crypt, len, 0);
result = !memcmp(binary, crypt, len);
//dump_stuff_msg("hash binary", binary, len);
//dump_stuff_msg("calc binary", crypt, len);
MEM_FREE(binary);
MEM_FREE(crypt);
if (!result)
fprintf(stderr, "\n%s: Warning: Partial match for '%s'.\n"
"This is a bug or a malformed input line of:\n%s\n",
FORMAT_LABEL, saved_key[index], source);
return result;
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->rounds;
}
#endif
struct fmt_main fmt_pbkdf2_hmac_sha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifdef __ENCLAVE_OBLIVIOUS__
#include "column_matrix_obl.h"
#else
#ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_
#define XGBOOST_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include <memory>
#include "hist_util.h"
namespace xgboost {
namespace common {
class ColumnMatrix;
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base.
Different types of column index for each column allow
to reduce the memory usage. */
template <typename BinIdxType>
class Column {
public:
Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base)
: type_(type),
index_(index),
index_base_(index_base) {}
virtual ~Column() = default;
uint32_t GetGlobalBinIdx(size_t idx) const {
return index_base_ + static_cast<uint32_t>(index_[idx]);
}
BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
const uint32_t GetBaseIdx() const { return index_base_; }
common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; }
ColumnType GetType() const { return type_; }
/* returns number of elements in column */
size_t Size() const { return index_.size(); }
private:
/* type of column */
ColumnType type_;
/* bin indexes in range [0, max_bins - 1] */
common::Span<const BinIdxType> index_;
/* bin index offset for specific feature */
const uint32_t index_base_;
};
template <typename BinIdxType>
class SparseColumn: public Column<BinIdxType> {
public:
SparseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, common::Span<const size_t> row_ind)
: Column<BinIdxType>(type, index, index_base),
row_ind_(row_ind) {}
const size_t* GetRowData() const { return row_ind_.data(); }
size_t GetRowIdx(size_t idx) const {
return row_ind_.data()[idx];
}
private:
/* indexes of rows */
common::Span<const size_t> row_ind_;
};
template <typename BinIdxType>
class DenseColumn: public Column<BinIdxType> {
public:
DenseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, const std::vector<bool>& missing_flags,
size_t feature_offset)
: Column<BinIdxType>(type, index, index_base),
missing_flags_(missing_flags),
feature_offset_(feature_offset) {}
bool IsMissing(size_t idx) const { return missing_flags_[feature_offset_ + idx]; }
private:
/* flags for missing values in dense columns */
const std::vector<bool>& missing_flags_;
size_t feature_offset_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
#ifdef __ENCLAVE_OBLIVIOUS__
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (bst_uint fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val);
}
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
boundary_.resize(nfeature);
size_t accum_index_ = 0;
size_t accum_row_ind_ = 0;
for (int32_t fid = 0; fid < nfeature; ++fid) {
boundary_[fid].index_begin = accum_index_;
boundary_[fid].row_ind_begin = accum_row_ind_;
if (type_[fid] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
accum_row_ind_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid];
accum_row_ind_ += feature_counts_[fid];
}
boundary_[fid].index_end = accum_index_;
boundary_[fid].row_ind_end = accum_row_ind_;
}
index_.resize(boundary_[nfeature - 1].index_end);
row_ind_.resize(boundary_[nfeature - 1].row_ind_end);
// store least bin id for each feature
index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data());
// pre-fill index_ for dense columns
#pragma omp parallel for
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (type_[fid] == kDenseColumn) {
const size_t ibegin = boundary_[fid].index_begin;
//uint32_t* begin = &index_[ibegin];
//uint32_t* end = begin + nrow;
//std::fill(begin, end, std::numeric_limits<uint32_t>::max());
for (uint32_t i = 0; i < nrow; i++) {
index_[ibegin + i] = std::numeric_limits<uint32_t>::max();
}
// max() indicates missing values
}
}
// loop over all rows and fill column entries
// num_nonzeros[fid] = how many nonzeros have this feature accumulated so far?
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
// For oblivious.
row_wise_index_.resize(nrow * nfeature);
std::fill(row_wise_index_.begin(), row_wise_index_.end(),
std::numeric_limits<uint32_t>::max());
nfeature_ = nfeature;
for (size_t rid = 0; rid < nrow; ++rid) {
const size_t ibegin = gmat.row_ptr[rid];
const size_t iend = gmat.row_ptr[rid + 1];
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i) {
// NOTE: For dense data structure, below codes are already oblivious,
// given the |gmat.index| in one instance are already sorted.
const uint32_t bin_id = gmat.index[i];
while (bin_id >= gmat.cut.Ptrs()[fid + 1]) {
++fid;
}
// For oblivious. Note this contains index_base.
row_wise_index_[rid * nfeature + fid] = bin_id;
if (type_[fid] == kDenseColumn) {
//uint32_t* begin = &index_[boundary_[fid].index_begin];
//begin[rid] = bin_id - index_base_[fid];
index_[boundary_[fid].index_begin + rid] = bin_id - index_base_[fid];
} else {
//uint32_t* begin = &index_[boundary_[fid].index_begin];
//begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
index_[boundary_[fid].index_begin + num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[boundary_[fid].row_ind_begin + num_nonzeros[fid]] = rid;
++num_nonzeros[fid];
}
}
}
}
#else
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (int32_t fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val);
}
bool all_dense = gmat.IsDense();
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
all_dense = false;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
feature_offsets_.resize(nfeature + 1);
size_t accum_index_ = 0;
feature_offsets_[0] = accum_index_;
for (int32_t fid = 1; fid < nfeature + 1; ++fid) {
if (type_[fid - 1] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid - 1];
}
feature_offsets_[fid] = accum_index_;
}
SetTypeSize(gmat.max_num_bins);
index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0);
if (!all_dense) {
row_ind_.resize(feature_offsets_[nfeature]);
}
// store least bin id for each feature
index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data());
const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature);
any_missing_ = !noMissingValues;
if (noMissingValues) {
missing_flags_.resize(feature_offsets_[nfeature], false);
} else {
missing_flags_.resize(feature_offsets_[nfeature], true);
}
// pre-fill index_ for dense columns
if (all_dense) {
BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize();
if (gmat_bin_size == kUint8BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues);
} else if (gmat_bin_size == kUint16BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues);
} else {
CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize);
SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues);
}
/* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize
but for ColumnMatrix we still have a chance to reduce the memory consumption */
} else {
if (bins_type_size_ == kUint8BinsTypeSize) {
SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else if (bins_type_size_ == kUint16BinsTypeSize) {
SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else {
CHECK_EQ(bins_type_size_, kUint32BinsTypeSize);
SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
}
}
}
#endif
/* Set the number of bytes based on numeric limit of maximum number of bins provided by user */
void SetTypeSize(size_t max_num_bins) {
if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) {
bins_type_size_ = kUint8BinsTypeSize;
} else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) {
bins_type_size_ = kUint16BinsTypeSize;
} else {
bins_type_size_ = kUint32BinsTypeSize;
}
}
/* Fetch an individual column. This code should be used with type swith
to determine type of bin id's */
template <typename BinIdxType>
std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const {
CHECK_EQ(sizeof(BinIdxType), bins_type_size_);
const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature
const size_t column_size = feature_offsets_[fid + 1] - feature_offset;
common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>(
&index_[feature_offset * bins_type_size_]),
column_size };
std::unique_ptr<const Column<BinIdxType> > res;
if (type_[fid] == ColumnType::kDenseColumn) {
res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
missing_flags_, feature_offset));
} else {
res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
{&row_ind_[feature_offset], column_size}));
}
return res;
}
template<typename T>
inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow,
const size_t nfeature, const bool noMissingValues) {
T* local_index = reinterpret_cast<T*>(&index_[0]);
/* missing values make sense only for column with type kDenseColumn,
and if no missing values were observed it could be handled much faster. */
if (noMissingValues) {
#pragma omp parallel for num_threads(omp_get_max_threads())
for (omp_ulong rid = 0; rid < nrow; ++rid) {
const size_t ibegin = rid*nfeature;
const size_t iend = (rid+1)*nfeature;
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const size_t idx = feature_offsets_[j];
local_index[idx + rid] = index[i];
}
}
} else {
/* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
fid = inst[j].index;
const size_t idx = feature_offsets_[fid];
/* rbegin allows to store indexes from specific SparsePage batch */
local_index[idx + rbegin + rid] = index[i];
missing_flags_[idx + rbegin + rid] = false;
}
}
rbegin += batch.Size();
}
}
}
template<typename T>
inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat,
const size_t nrow, const size_t nfeature) {
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
T* local_index = reinterpret_cast<T*>(&index_[0]);
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
size_t fid = 0;
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const uint32_t bin_id = index[i];
fid = inst[j].index;
if (type_[fid] == kDenseColumn) {
T* begin = &local_index[feature_offsets_[fid]];
begin[rid + rbegin] = bin_id - index_base_[fid];
missing_flags_[feature_offsets_[fid] + rid + rbegin] = false;
} else {
T* begin = &local_index[feature_offsets_[fid]];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin;
++num_nonzeros[fid];
}
}
}
rbegin += batch.Size();
}
}
const BinTypeSize GetTypeSize() const {
return bins_type_size_;
}
// This is just an utility function
const bool NoMissingValues(const size_t n_elements,
const size_t n_row, const size_t n_features) {
return n_elements == n_features * n_row;
}
// And this returns part of state
const bool AnyMissing() const {
return any_missing_;
}
#ifdef __ENCLAVE_OBLIVIOUS__
inline uint32_t OGetRowFeatureBinIndex(size_t row_idx, int fid) const {
// NOTE: `oaccess` between [row_idx * nfeature, (row_idx + 1) * nfeature]
// return row_wise_index_[row_idx * nfeature_ + fid];
return ObliviousArrayAccess(row_wise_index_.data() + row_idx * nfeature_,
fid, nfeature_);
}
#endif
private:
std::vector<uint8_t> index_;
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
std::vector<size_t> row_ind_;
/* indicate where each column's index and row_ind is stored. */
std::vector<size_t> feature_offsets_;
// index_base_[fid]: least bin id for feature fid
uint32_t* index_base_;
std::vector<bool> missing_flags_;
BinTypeSize bins_type_size_;
bool any_missing_;
#ifdef __ENCLAVE_OBLIVIOUS__
struct ColumnBoundary {
// indicate where each column's index and row_ind is stored.
// index_begin and index_end are logical offsets, so they should be converted to
// actual offsets by scaling with packing_factor_
size_t index_begin;
size_t index_end;
size_t row_ind_begin;
size_t row_ind_end;
};
std::vector<ColumnBoundary> boundary_;
// Row wise feature index, this helps reduce `oaccess` range to number of features.
std::vector<uint32_t> row_wise_index_;
int32_t nfeature_;
#endif
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
#endif // __ENCLAVE_OBLIVIOUS__
|
par_csr_matop.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_mv.h"
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "_hypre_parcsr_mv.h"
/* RDF: The following prototype already exists in _hypre_parcsr_ls.h, so
* something needs to be reorganized here.*/
#ifdef __cplusplus
extern "C" {
#endif
hypre_CSRMatrix *
hypre_ExchangeRAPData( hypre_CSRMatrix *RAP_int, hypre_ParCSRCommPkg *comm_pkg_RT);
/* reference seems necessary to prevent a problem with the
"headers" script... */
#ifdef __cplusplus
}
#endif
/* The following function was formerly part of hypre_ParMatmul
but was removed so it can also be used for multiplication of
Boolean matrices
*/
void hypre_ParMatmul_RowSizes(
HYPRE_Int ** C_diag_i,
HYPRE_Int ** C_offd_i,
/*HYPRE_Int ** B_marker,*/
HYPRE_Int * A_diag_i,
HYPRE_Int * A_diag_j,
HYPRE_Int * A_offd_i,
HYPRE_Int * A_offd_j,
HYPRE_Int * B_diag_i,
HYPRE_Int * B_diag_j,
HYPRE_Int * B_offd_i,
HYPRE_Int * B_offd_j,
HYPRE_Int * B_ext_diag_i,
HYPRE_Int * B_ext_diag_j,
HYPRE_Int * B_ext_offd_i,
HYPRE_Int * B_ext_offd_j,
HYPRE_Int * map_B_to_C,
HYPRE_Int *C_diag_size,
HYPRE_Int *C_offd_size,
HYPRE_Int num_rows_diag_A,
HYPRE_Int num_cols_offd_A,
HYPRE_Int allsquare,
HYPRE_Int num_cols_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_Int num_cols_offd_C
)
{
HYPRE_Int i1, i2, i3, jj2, jj3;
HYPRE_Int jj_count_diag, jj_count_offd, jj_row_begin_diag, jj_row_begin_offd;
HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */
HYPRE_Int num_threads = hypre_NumThreads();
HYPRE_Int *jj_count_diag_array;
HYPRE_Int *jj_count_offd_array;
HYPRE_Int ii, size, rest;
/* First pass begins here. Computes sizes of C rows.
Arrays computed: C_diag_i, C_offd_i, B_marker
Arrays needed: (11, all HYPRE_Int*)
A_diag_i, A_diag_j, A_offd_i, A_offd_j,
B_diag_i, B_diag_j, B_offd_i, B_offd_j,
B_ext_i, B_ext_j, col_map_offd_B,
col_map_offd_B, B_offd_i, B_offd_j, B_ext_i, B_ext_j,
Scalars computed: C_diag_size, C_offd_size
Scalars needed:
num_rows_diag_A, num_rows_diag_A, num_cols_offd_A, allsquare,
first_col_diag_B, n_cols_B, num_cols_offd_B, num_cols_diag_B
*/
*C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1);
*C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1);
jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads);
/*-----------------------------------------------------------------------
* Loop over rows of A
*-----------------------------------------------------------------------*/
size = num_rows_diag_A/num_threads;
rest = num_rows_diag_A - size*num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ii, i1, jj_row_begin_diag, jj_row_begin_offd, jj_count_diag, jj_count_offd, jj2, i2, jj3, i3)
#endif
/*for (ii=0; ii < num_threads; ii++)*/
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (num_cols_diag_B || num_cols_offd_C)
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C);
for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++)
B_marker[i1] = -1;
for (i1 = ns; i1 < ne; i1++)
{
/*--------------------------------------------------------------------
* Set marker for diagonal entry, C_{i1,i1} (for square matrices).
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if ( allsquare ) {
B_marker[i1] = jj_count_diag;
jj_count_diag++;
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_ext.
*-----------------------------------------------------------*/
for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+B_ext_offd_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{i1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++)
{
i3 = B_ext_diag_j[jj3];
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_diag.
*-----------------------------------------------------------*/
for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++)
{
i3 = B_diag_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{i1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_B)
{
for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]];
/*--------------------------------------------------------
* Check B_marker to see that C_{i1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
/*--------------------------------------------------------------------
* Set C_diag_i and C_offd_i for this row.
*--------------------------------------------------------------------*/
(*C_diag_i)[i1] = jj_row_begin_diag;
(*C_offd_i)[i1] = jj_row_begin_offd;
}
jj_count_diag_array[ii] = jj_count_diag;
jj_count_offd_array[ii] = jj_count_offd;
hypre_TFree(B_marker);
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
jj_count_diag = jj_count_diag_array[0];
jj_count_offd = jj_count_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
jj_count_diag += jj_count_diag_array[i1];
jj_count_offd += jj_count_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
(*C_diag_i)[i1] += jj_count_diag;
(*C_offd_i)[i1] += jj_count_offd;
}
}
else
{
(*C_diag_i)[num_rows_diag_A] = 0;
(*C_offd_i)[num_rows_diag_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
(*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1];
(*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1];
}
}
} /* end parallel loop */
/*-----------------------------------------------------------------------
* Allocate C_diag_data and C_diag_j arrays.
* Allocate C_offd_data and C_offd_j arrays.
*-----------------------------------------------------------------------*/
*C_diag_size = (*C_diag_i)[num_rows_diag_A];
*C_offd_size = (*C_offd_i)[num_rows_diag_A];
hypre_TFree(jj_count_diag_array);
hypre_TFree(jj_count_offd_array);
/* End of First Pass */
}
/*--------------------------------------------------------------------------
* hypre_ParMatmul : multiplies two ParCSRMatrices A and B and returns
* the product in ParCSRMatrix C
* Note that C does not own the partitionings since its row_starts
* is owned by A and col_starts by B.
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *hypre_ParMatmul( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts_A = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Int first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B);
HYPRE_Int last_col_diag_B;
HYPRE_Int *col_starts_B = hypre_ParCSRMatrixColStarts(B);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_ParCSRMatrix *C;
HYPRE_Int *col_map_offd_C;
HYPRE_Int *map_B_to_C=NULL;
hypre_CSRMatrix *C_diag;
HYPRE_Complex *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
hypre_CSRMatrix *C_offd;
HYPRE_Complex *C_offd_data=NULL;
HYPRE_Int *C_offd_i=NULL;
HYPRE_Int *C_offd_j=NULL;
HYPRE_Int C_diag_size;
HYPRE_Int C_offd_size;
HYPRE_Int num_cols_offd_C = 0;
hypre_CSRMatrix *Bs_ext;
HYPRE_Complex *Bs_ext_data;
HYPRE_Int *Bs_ext_i;
HYPRE_Int *Bs_ext_j;
HYPRE_Complex *B_ext_diag_data;
HYPRE_Int *B_ext_diag_i;
HYPRE_Int *B_ext_diag_j;
HYPRE_Int B_ext_diag_size;
HYPRE_Complex *B_ext_offd_data;
HYPRE_Int *B_ext_offd_i;
HYPRE_Int *B_ext_offd_j;
HYPRE_Int B_ext_offd_size;
HYPRE_Int n_rows_A, n_cols_A;
HYPRE_Int n_rows_B, n_cols_B;
HYPRE_Int allsquare = 0;
HYPRE_Int num_procs;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_Int max_num_threads;
HYPRE_Complex zero = 0.0;
n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A);
n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A);
n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B);
n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B);
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads);
if (n_cols_A != n_rows_B || num_cols_diag_A != num_rows_diag_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n");
return NULL;
}
if ( num_rows_diag_A==num_cols_diag_B) allsquare = 1;
/*-----------------------------------------------------------------------
* Extract B_ext, i.e. portion of B that is stored on neighbor procs
* and needed locally for matrix matrix product
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings within
* hypre_ParCSRMatrixExtractBExt
*--------------------------------------------------------------------*/
Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1);
Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
Bs_ext_j = hypre_CSRMatrixJ(Bs_ext);
}
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1);
B_ext_diag_size = 0;
B_ext_offd_size = 0;
last_col_diag_B = first_col_diag_B + num_cols_diag_B -1;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntSet set;
#pragma omp parallel
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_offd_A/num_threads;
rest = num_cols_offd_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i=ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
my_offd_size++;
else
my_diag_size++;
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#pragma omp barrier
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size;
B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size);
}
hypre_UnorderedIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads());
}
#pragma omp barrier
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i=ns; i < ne; i++)
{
for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
hypre_UnorderedIntSetPut(&set, Bs_ext_j[j]);
B_ext_offd_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B;
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B);
for (i = i_begin; i < i_end; i++)
{
hypre_UnorderedIntSetPut(&set, col_map_offd_B[i]);
}
} /* omp parallel */
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Bs_ext);
Bs_ext = NULL;
}
col_map_offd_C = hypre_UnorderedIntSetCopyToArray(&set, &num_cols_offd_C);
hypre_UnorderedIntSetDestroy(&set);
hypre_UnorderedIntMap col_map_offd_C_inverse;
hypre_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse);
HYPRE_Int i, j;
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_A; i++)
for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]);
if (num_cols_offd_C)
{
hypre_UnorderedIntMapDestroy(&col_map_offd_C_inverse);
}
hypre_TFree(my_diag_array);
hypre_TFree(my_offd_array);
if (num_cols_offd_B)
{
HYPRE_Int i;
map_B_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_B);
#pragma omp parallel private(i)
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C);
HYPRE_Int cnt;
if (i_end > i_begin)
{
cnt = hypre_LowerBound(col_map_offd_B, col_map_offd_B + num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B;
}
for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
}
}
}
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int *temp;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_offd_A/num_threads;
rest = num_cols_offd_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i=ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
my_offd_size++;
else
my_diag_size++;
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size;
B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size);
}
if (B_ext_offd_size || num_cols_offd_B)
temp = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size+num_cols_offd_B);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i=ns; i < ne; i++)
{
for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_ext_offd_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B;
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
HYPRE_Int cnt;
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Bs_ext);
Bs_ext = NULL;
}
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i=0; i < num_cols_offd_B; i++)
temp[cnt++] = col_map_offd_B[i];
if (cnt)
{
HYPRE_Int value;
hypre_qsort0(temp, 0, cnt-1);
num_cols_offd_C = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
col_map_offd_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_C);
for (i=0; i < num_cols_offd_C; i++)
col_map_offd_C[i] = temp[i];
hypre_TFree(temp);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=ns; i < ne; i++)
for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
B_ext_offd_j[j] = hypre_BinarySearch(col_map_offd_C, B_ext_offd_j[j],
num_cols_offd_C);
} /* end parallel region */
hypre_TFree(my_diag_array);
hypre_TFree(my_offd_array);
if (num_cols_offd_B)
{
HYPRE_Int i, cnt;
map_B_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_B);
cnt = 0;
for (i=0; i < num_cols_offd_C; i++)
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
if (cnt == num_cols_offd_B) break;
}
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
hypre_ParMatmul_RowSizes(
/*&C_diag_i, &C_offd_i, &B_marker,*/
&C_diag_i, &C_offd_i,
A_diag_i, A_diag_j, A_offd_i, A_offd_j,
B_diag_i, B_diag_j, B_offd_i, B_offd_j,
B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j,
map_B_to_C,
&C_diag_size, &C_offd_size,
num_rows_diag_A, num_cols_offd_A, allsquare,
num_cols_diag_B, num_cols_offd_B,
num_cols_offd_C
);
/*-----------------------------------------------------------------------
* Allocate C_diag_data and C_diag_j arrays.
* Allocate C_offd_data and C_offd_j arrays.
*-----------------------------------------------------------------------*/
last_col_diag_B = first_col_diag_B + num_cols_diag_B - 1;
C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size);
C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size);
if (C_offd_size)
{
C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size);
C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in C_diag_data and C_diag_j.
* Second Pass: Fill in C_offd_data and C_offd_j.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, size, rest, ii;
HYPRE_Int i1, i2, i3, jj2, jj3;
HYPRE_Int jj_row_begin_diag, jj_count_diag;
HYPRE_Int jj_row_begin_offd, jj_count_offd;
HYPRE_Int num_threads;
HYPRE_Complex a_entry; /*, a_b_product;*/
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
size = num_rows_diag_A/num_threads;
rest = num_rows_diag_A - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
jj_count_diag = C_diag_i[ns];
jj_count_offd = C_offd_i[ns];
if (num_cols_diag_B || num_cols_offd_C)
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C);
for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++)
B_marker[i1] = -1;
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (i1 = ns; i1 < ne; i1++)
{
/*--------------------------------------------------------------------
* Create diagonal entry, C_{i1,i1}
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if ( allsquare )
{
B_marker[i1] = jj_count_diag;
C_diag_data[jj_count_diag] = zero;
C_diag_j[jj_count_diag] = i1;
jj_count_diag++;
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
a_entry = A_offd_data[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_ext.
*-----------------------------------------------------------*/
for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+B_ext_offd_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{i1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3];
C_offd_j[jj_count_offd] = i3-num_cols_diag_B;
jj_count_offd++;
}
else
C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3];
}
for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++)
{
i3 = B_ext_diag_j[jj3];
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3];
C_diag_j[jj_count_diag] = i3;
jj_count_diag++;
}
else
C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3];
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
a_entry = A_diag_data[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_diag.
*-----------------------------------------------------------*/
for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++)
{
i3 = B_diag_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{i1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3];
C_diag_j[jj_count_diag] = i3;
jj_count_diag++;
}
else
{
C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3];
}
}
if (num_cols_offd_B)
{
for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]];
/*--------------------------------------------------------
* Check B_marker to see that C_{i1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3];
C_offd_j[jj_count_offd] = i3-num_cols_diag_B;
jj_count_offd++;
}
else
{
C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3];
}
}
}
}
}
hypre_TFree(B_marker);
} /*end parallel region */
C = hypre_ParCSRMatrixCreate(comm, n_rows_A, n_cols_B, row_starts_A,
col_starts_B, num_cols_offd_C,
C_diag_size, C_offd_size);
/* Note that C does not own the partitionings */
hypre_ParCSRMatrixSetRowStartsOwner(C,0);
hypre_ParCSRMatrixSetColStartsOwner(C,0);
C_diag = hypre_ParCSRMatrixDiag(C);
hypre_CSRMatrixData(C_diag) = C_diag_data;
hypre_CSRMatrixI(C_diag) = C_diag_i;
hypre_CSRMatrixJ(C_diag) = C_diag_j;
C_offd = hypre_ParCSRMatrixOffd(C);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_ParCSRMatrixOffd(C) = C_offd;
if (num_cols_offd_C)
{
hypre_CSRMatrixData(C_offd) = C_offd_data;
hypre_CSRMatrixJ(C_offd) = C_offd_j;
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
}
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
hypre_TFree(B_ext_diag_i);
if (B_ext_diag_size)
{
hypre_TFree(B_ext_diag_j);
hypre_TFree(B_ext_diag_data);
}
hypre_TFree(B_ext_offd_i);
if (B_ext_offd_size)
{
hypre_TFree(B_ext_offd_j);
hypre_TFree(B_ext_offd_data);
}
if (num_cols_offd_B) hypre_TFree(map_B_to_C);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime();
#endif
return C;
}
/* The following function was formerly part of hypre_ParCSRMatrixExtractBExt
but the code was removed so it can be used for a corresponding function
for Boolean matrices
JSP: to allow communication overlapping, it returns comm_handle_idx and
comm_handle_data. Before accessing B, they should be destroyed (including
send_data contained in the comm_handle).
*/
void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap(
HYPRE_Int ** pB_ext_i,
HYPRE_Int ** pB_ext_j,
HYPRE_Complex ** pB_ext_data,
HYPRE_Int ** pB_ext_row_map,
HYPRE_Int * num_nonzeros,
HYPRE_Int data,
HYPRE_Int find_row_map,
MPI_Comm comm,
hypre_ParCSRCommPkg * comm_pkg,
HYPRE_Int num_cols_B,
HYPRE_Int num_recvs,
HYPRE_Int num_sends,
HYPRE_Int first_col_diag,
HYPRE_Int * row_starts,
HYPRE_Int * recv_vec_starts,
HYPRE_Int * send_map_starts,
HYPRE_Int * send_map_elmts,
HYPRE_Int * diag_i,
HYPRE_Int * diag_j,
HYPRE_Int * offd_i,
HYPRE_Int * offd_j,
HYPRE_Int * col_map_offd,
HYPRE_Real * diag_data,
HYPRE_Real * offd_data,
hypre_ParCSRCommHandle **comm_handle_idx,
hypre_ParCSRCommHandle **comm_handle_data,
HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd,
HYPRE_Int skip_fine, /* 1 if only coarse points are needed */
HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */
// extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix
// other interpolation: skip_fine = 0, skip_same_sign = 0
)
{
hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL;
hypre_ParCSRCommPkg *tmp_comm_pkg;
HYPRE_Int *B_int_i;
HYPRE_Int *B_int_j;
HYPRE_Int *B_ext_i;
HYPRE_Int * B_ext_j;
HYPRE_Complex * B_ext_data;
HYPRE_Complex * B_int_data;
HYPRE_Int * B_int_row_map;
HYPRE_Int * B_ext_row_map;
HYPRE_Int num_procs, my_id;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i, j, k;
HYPRE_Int start_index;
/*HYPRE_Int jrow;*/
HYPRE_Int num_rows_B_ext;
HYPRE_Int *prefix_sum_workspace;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int first_row_index = row_starts[0];
#else
HYPRE_Int first_row_index = row_starts[my_id];
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
#endif
num_rows_B_ext = recv_vec_starts[num_recvs];
if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */
*pB_ext_i = NULL;
*pB_ext_j = NULL;
if ( data ) *pB_ext_data = NULL;
if ( find_row_map ) *pB_ext_row_map = NULL;
*num_nonzeros = 0;
return;
};
B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1);
B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1);
*pB_ext_i = B_ext_i;
if ( find_row_map ) {
B_int_row_map = hypre_CTAlloc( HYPRE_Int, send_map_starts[num_sends]+1 );
B_ext_row_map = hypre_CTAlloc( HYPRE_Int, num_rows_B_ext+1 );
*pB_ext_row_map = B_ext_row_map;
};
/*--------------------------------------------------------------------------
* generate B_int_i through adding number of row-elements of offd and diag
* for corresponding rows. B_int_i[j+1] contains the number of elements of
* a row j (which is determined through send_map_elmts)
*--------------------------------------------------------------------------*/
jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1);
jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1);
jdata_send_map_starts[0] = B_int_i[0] = 0;
/*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,k)
#endif
{
/*HYPRE_Int counts[num_sends];*/
HYPRE_Int *counts;
counts = hypre_TAlloc(HYPRE_Int, num_sends);
for (i=0; i < num_sends; i++)
{
HYPRE_Int j_begin, j_end;
hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]);
j_begin += send_map_starts[i];
j_end += send_map_starts[i];
HYPRE_Int count = 0;
if (skip_fine && skip_same_sign)
{
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int send_proc = send_procs[i];
HYPRE_Int send_proc_first_row = row_starts[send_proc];
HYPRE_Int send_proc_last_row = row_starts[send_proc + 1];
#endif
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = 0;
if (diag_data[diag_i[jrow]] >= 0)
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (offd_data[k] < 0) len++;
#else
HYPRE_Int c = offd_j[k];
HYPRE_Int c_global = col_map_offd[c];
if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++;
#endif
}
}
else
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (offd_data[k] > 0) len++;
#else
HYPRE_Int c = offd_j[k];
HYPRE_Int c_global = col_map_offd[c];
if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++;
#endif
}
}
B_int_i[j + 1] = len;
count += len;
}
}
else if (skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = 0;
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
if (CF_marker[diag_j[k]] >= 0) len++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (CF_marker_offd[offd_j[k]] >= 0) len++;
}
B_int_i[j + 1] = len;
count += len;
}
}
else
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow];
len += offd_i[jrow + 1] - offd_i[jrow];
B_int_i[j + 1] = len;
count += len;
}
}
if (find_row_map)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
B_int_row_map[j] = jrow + first_row_index;
}
}
counts[i] = count;
}
hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
for (i = 1; i < num_sends; i++)
{
jdata_send_map_starts[i + 1] += jdata_send_map_starts[i];
}
/*--------------------------------------------------------------------------
* initialize communication
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,
&B_int_i[1],&(B_ext_i[1]) );
if ( find_row_map )
{
/* scatter/gather B_int row numbers to form array of B_ext row numbers */
row_map_comm_handle = hypre_ParCSRCommHandleCreate
(11,comm_pkg, B_int_row_map, B_ext_row_map );
}
B_int_j = hypre_TAlloc(HYPRE_Int, jdata_send_map_starts[num_sends]);
if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends]);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=0; i < num_sends; i++)
{
HYPRE_Int j_begin, j_end;
hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]);
j_begin += send_map_starts[i];
j_end += send_map_starts[i];
HYPRE_Int count = counts[i] + jdata_send_map_starts[i];
if (data)
{
if (skip_same_sign && skip_fine)
{
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int send_proc = send_procs[i];
HYPRE_Int send_proc_first_row = row_starts[send_proc];
HYPRE_Int send_proc_last_row = row_starts[send_proc + 1];
#endif
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
/*HYPRE_Int count_begin = count;*/
if (diag_data[diag_i[jrow]] >= 0)
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = diag_j[k]+first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
HYPRE_Int c = offd_j[k];
HYPRE_Int c_global = col_map_offd[c];
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (offd_data[k] < 0)
#else
if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row)))
#endif
{
B_int_j[count] = c_global;
B_int_data[count] = offd_data[k];
count++;
}
}
}
else
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = diag_j[k]+first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
HYPRE_Int c = offd_j[k];
HYPRE_Int c_global = col_map_offd[c];
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (offd_data[k] > 0)
#else
if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row)))
#endif
{
B_int_j[count] = c_global;
B_int_data[count] = offd_data[k];
count++;
}
}
}
}
}
else
{
for (j = j_begin; j < j_end; ++j) {
HYPRE_Int jrow = send_map_elmts[j];
for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++)
{
B_int_j[count] = diag_j[k]+first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++)
{
B_int_j[count] = col_map_offd[offd_j[k]];
B_int_data[count] = offd_data[k];
count++;
}
}
}
} // data
else
{
if (skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
if (CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = diag_j[k] + first_col_diag;
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (CF_marker_offd[offd_j[k]] >= 0)
{
B_int_j[count] = col_map_offd[offd_j[k]];
count++;
}
}
}
}
else
{
for (j = j_begin; j < j_end; ++j) {
HYPRE_Int jrow = send_map_elmts[j];
for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++)
{
B_int_j[count] = diag_j[k]+first_col_diag;
count++;
}
for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++)
{
B_int_j[count] = col_map_offd[offd_j[k]];
count++;
}
}
}
} // !data
} /* for each send target */
hypre_TFree(counts);
} /* omp parallel. JSP: this takes most of time in this function */
hypre_TFree(prefix_sum_workspace);
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts;
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/*--------------------------------------------------------------------------
* after communication exchange B_ext_i[j+1] contains the number of elements
* of a row j !
* evaluate B_ext_i and compute *num_nonzeros for B_ext
*--------------------------------------------------------------------------*/
for (i=0; i < num_recvs; i++)
for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
B_ext_i[j+1] += B_ext_i[j];
*num_nonzeros = B_ext_i[num_rows_B_ext];
*pB_ext_j = hypre_TAlloc(HYPRE_Int, *num_nonzeros);
B_ext_j = *pB_ext_j;
if (data) {
*pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros);
B_ext_data = *pB_ext_data;
};
for (i=0; i < num_recvs; i++)
{
start_index = B_ext_i[recv_vec_starts[i]];
*num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index;
jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]];
}
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts;
*comm_handle_idx = hypre_ParCSRCommHandleCreate(11,tmp_comm_pkg,B_int_j,B_ext_j);
if (data)
{
*comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data,
B_ext_data);
}
if (row_map_comm_handle)
{
hypre_ParCSRCommHandleDestroy(row_map_comm_handle);
row_map_comm_handle = NULL;
}
hypre_TFree(jdata_send_map_starts);
hypre_TFree(jdata_recv_vec_starts);
hypre_TFree(tmp_comm_pkg);
hypre_TFree(B_int_i);
if ( find_row_map ) hypre_TFree(B_int_row_map);
/* end generic part */
}
void hypre_ParCSRMatrixExtractBExt_Arrays(
HYPRE_Int ** pB_ext_i,
HYPRE_Int ** pB_ext_j,
HYPRE_Complex ** pB_ext_data,
HYPRE_Int ** pB_ext_row_map,
HYPRE_Int * num_nonzeros,
HYPRE_Int data,
HYPRE_Int find_row_map,
MPI_Comm comm,
hypre_ParCSRCommPkg * comm_pkg,
HYPRE_Int num_cols_B,
HYPRE_Int num_recvs,
HYPRE_Int num_sends,
HYPRE_Int first_col_diag,
HYPRE_Int * row_starts,
HYPRE_Int * recv_vec_starts,
HYPRE_Int * send_map_starts,
HYPRE_Int * send_map_elmts,
HYPRE_Int * diag_i,
HYPRE_Int * diag_j,
HYPRE_Int * offd_i,
HYPRE_Int * offd_j,
HYPRE_Int * col_map_offd,
HYPRE_Real * diag_data,
HYPRE_Real * offd_data
)
{
hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data;
hypre_ParCSRMatrixExtractBExt_Arrays_Overlap(
pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros,
data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends,
first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts,
diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data,
&comm_handle_idx, &comm_handle_data,
NULL, NULL,
0, 0);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_idx);
hypre_TFree(send_idx);
if (data)
{
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_data);
hypre_TFree(send_data);
}
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on
* other processors and needed for multiplication with A locally. The rows
* are returned as CSRMatrix.
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int data,
hypre_ParCSRCommHandle **comm_handle_idx,
hypre_ParCSRCommHandle **comm_handle_data,
HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd,
HYPRE_Int skip_fine, HYPRE_Int skip_same_sign )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(B);
HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(B);
/*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(B);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int num_sends;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Real *diag_data = hypre_CSRMatrixData(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Real *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int num_cols_B, num_nonzeros;
HYPRE_Int num_rows_B_ext;
hypre_CSRMatrix *B_ext;
HYPRE_Int *B_ext_i;
HYPRE_Int *B_ext_j;
HYPRE_Complex *B_ext_data;
HYPRE_Int *idummy;
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B);
num_rows_B_ext = recv_vec_starts[num_recvs];
hypre_ParCSRMatrixExtractBExt_Arrays_Overlap
( &B_ext_i, &B_ext_j, &B_ext_data, &idummy,
&num_nonzeros,
data, 0, comm, comm_pkg,
num_cols_B, num_recvs, num_sends,
first_col_diag, B->row_starts,
recv_vec_starts, send_map_starts, send_map_elmts,
diag_i, diag_j, offd_i, offd_j, col_map_offd,
diag_data, offd_data,
comm_handle_idx, comm_handle_data,
CF_marker, CF_marker_offd,
skip_fine, skip_same_sign
);
B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros);
hypre_CSRMatrixI(B_ext) = B_ext_i;
hypre_CSRMatrixJ(B_ext) = B_ext_j;
if (data) hypre_CSRMatrixData(B_ext) = B_ext_data;
return B_ext;
}
hypre_CSRMatrix *
hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int data )
{
hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data;
hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_idx);
hypre_TFree(send_idx);
if (data)
{
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_data);
hypre_TFree(send_data);
}
return B_ext;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **AT_ptr,
HYPRE_Int data )
{
hypre_ParCSRCommHandle *comm_handle;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A);
HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, num_recvs, num_cols_offd_AT;
HYPRE_Int i, j, k, index, counter, j_row;
HYPRE_Int value;
hypre_ParCSRMatrix *AT;
hypre_CSRMatrix *AT_diag;
hypre_CSRMatrix *AT_offd;
hypre_CSRMatrix *AT_tmp;
HYPRE_Int first_row_index_AT, first_col_diag_AT;
HYPRE_Int local_num_rows_AT, local_num_cols_AT;
HYPRE_Int *AT_tmp_i;
HYPRE_Int *AT_tmp_j;
HYPRE_Complex *AT_tmp_data;
HYPRE_Int *AT_buf_i;
HYPRE_Int *AT_buf_j;
HYPRE_Complex *AT_buf_data;
HYPRE_Int *AT_offd_i;
HYPRE_Int *AT_offd_j;
HYPRE_Complex *AT_offd_data;
HYPRE_Int *col_map_offd_AT;
HYPRE_Int *row_starts_AT;
HYPRE_Int *col_starts_AT;
HYPRE_Int num_procs, my_id;
HYPRE_Int *recv_procs;
HYPRE_Int *send_procs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int *tmp_recv_vec_starts;
HYPRE_Int *tmp_send_map_starts;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_cols_offd_AT = 0;
counter = 0;
AT_offd_j = NULL;
AT_offd_data = NULL;
col_map_offd_AT = NULL;
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data);
AT_tmp_i = hypre_CSRMatrixI(AT_tmp);
AT_tmp_j = hypre_CSRMatrixJ(AT_tmp);
if (data) AT_tmp_data = hypre_CSRMatrixData(AT_tmp);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
AT_buf_i = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]);
for (i=0; i < AT_tmp_i[num_cols_offd]; i++)
AT_tmp_j[i] += first_row_index;
for (i=0; i < num_cols_offd; i++)
AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i];
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i);
}
hypre_CSRMatrixTranspose( A_diag, &AT_diag, data);
AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1);
tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
tmp_send_map_starts[0] = send_map_starts[0];
for (i=0; i < num_sends; i++)
{
tmp_send_map_starts[i+1] = tmp_send_map_starts[i];
for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
tmp_send_map_starts[i+1] += AT_buf_i[j];
AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j];
}
}
for (i=0; i < num_cols; i++)
AT_offd_i[i+1] += AT_offd_i[i];
tmp_recv_vec_starts[0] = recv_vec_starts[0];
for (i=0; i < num_recvs; i++)
{
tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i];
for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
tmp_recv_vec_starts[i+1] += AT_tmp_i[j];
}
}
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts;
AT_buf_j = hypre_CTAlloc(HYPRE_Int,tmp_send_map_starts[num_sends]);
comm_handle = hypre_ParCSRCommHandleCreate(12, tmp_comm_pkg, AT_tmp_j,
AT_buf_j);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (data)
{
AT_buf_data = hypre_CTAlloc(HYPRE_Complex,tmp_send_map_starts[num_sends]);
comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data,
AT_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
hypre_TFree(tmp_recv_vec_starts);
hypre_TFree(tmp_send_map_starts);
hypre_TFree(tmp_comm_pkg);
hypre_CSRMatrixDestroy(AT_tmp);
if (AT_offd_i[num_cols])
{
AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols]);
if (data) AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols]);
}
else
{
AT_offd_j = NULL;
AT_offd_data = NULL;
}
counter = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
j_row = send_map_elmts[j];
index = AT_offd_i[j_row];
for (k=0; k < AT_buf_i[j]; k++)
{
if (data) AT_offd_data[index] = AT_buf_data[counter];
AT_offd_j[index++] = AT_buf_j[counter++];
}
AT_offd_i[j_row] = index;
}
}
for (i=num_cols; i > 0; i--)
AT_offd_i[i] = AT_offd_i[i-1];
AT_offd_i[0] = 0;
if (counter)
{
hypre_qsort0(AT_buf_j,0,counter-1);
num_cols_offd_AT = 1;
value = AT_buf_j[0];
for (i=1; i < counter; i++)
{
if (value < AT_buf_j[i])
{
AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i];
value = AT_buf_j[i];
}
}
}
if (num_cols_offd_AT)
col_map_offd_AT = hypre_CTAlloc(HYPRE_Int, num_cols_offd_AT);
else
col_map_offd_AT = NULL;
for (i=0; i < num_cols_offd_AT; i++)
col_map_offd_AT[i] = AT_buf_j[i];
hypre_TFree(AT_buf_i);
hypre_TFree(AT_buf_j);
if (data) hypre_TFree(AT_buf_data);
for (i=0; i < counter; i++)
AT_offd_j[i] = hypre_BinarySearch(col_map_offd_AT,AT_offd_j[i],
num_cols_offd_AT);
}
AT_offd = hypre_CSRMatrixCreate(num_cols,num_cols_offd_AT,counter);
hypre_CSRMatrixI(AT_offd) = AT_offd_i;
hypre_CSRMatrixJ(AT_offd) = AT_offd_j;
hypre_CSRMatrixData(AT_offd) = AT_offd_data;
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts_AT = hypre_CTAlloc(HYPRE_Int, 2);
for (i=0; i < 2; i++)
row_starts_AT[i] = col_starts[i];
if (row_starts != col_starts)
{
col_starts_AT = hypre_CTAlloc(HYPRE_Int,2);
for (i=0; i < 2; i++)
col_starts_AT[i] = row_starts[i];
}
else
{
col_starts_AT = row_starts_AT;
}
first_row_index_AT = row_starts_AT[0];
first_col_diag_AT = col_starts_AT[0];
local_num_rows_AT = row_starts_AT[1]-first_row_index_AT ;
local_num_cols_AT = col_starts_AT[1]-first_col_diag_AT;
#else
row_starts_AT = hypre_CTAlloc(HYPRE_Int,num_procs+1);
for (i=0; i < num_procs+1; i++)
row_starts_AT[i] = col_starts[i];
if (row_starts != col_starts)
{
col_starts_AT = hypre_CTAlloc(HYPRE_Int,num_procs+1);
for (i=0; i < num_procs+1; i++)
col_starts_AT[i] = row_starts[i];
}
else
{
col_starts_AT = row_starts_AT;
}
first_row_index_AT = row_starts_AT[my_id];
first_col_diag_AT = col_starts_AT[my_id];
local_num_rows_AT = row_starts_AT[my_id+1]-first_row_index_AT ;
local_num_cols_AT = col_starts_AT[my_id+1]-first_col_diag_AT;
#endif
AT = hypre_CTAlloc(hypre_ParCSRMatrix,1);
hypre_ParCSRMatrixComm(AT) = comm;
hypre_ParCSRMatrixDiag(AT) = AT_diag;
hypre_ParCSRMatrixOffd(AT) = AT_offd;
hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT;
hypre_ParCSRMatrixColStarts(AT) = col_starts_AT;
hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT;
hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT;
hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT;
hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1;
hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1;
hypre_ParCSRMatrixOwnsData(AT) = 1;
hypre_ParCSRMatrixOwnsRowStarts(AT) = 1;
hypre_ParCSRMatrixOwnsColStarts(AT) = 1;
if (row_starts_AT == col_starts_AT)
hypre_ParCSRMatrixOwnsColStarts(AT) = 0;
hypre_ParCSRMatrixCommPkg(AT) = NULL;
hypre_ParCSRMatrixCommPkgT(AT) = NULL;
hypre_ParCSRMatrixRowindices(AT) = NULL;
hypre_ParCSRMatrixRowvalues(AT) = NULL;
hypre_ParCSRMatrixGetrowactive(AT) = 0;
*AT_ptr = AT;
return ierr;
}
/* -----------------------------------------------------------------------------
* generate a parallel spanning tree (for Maxwell Equation)
* G_csr is the node to edge connectivity matrix
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr,
HYPRE_Int **indices,
HYPRE_Int G_type )
{
HYPRE_Int nrows_G, ncols_G, *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge;
HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node;
HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts;
HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j;
HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i;
HYPRE_Int *T_diag_j, *counts, offset;
MPI_Comm comm;
hypre_ParCSRCommPkg *comm_pkg;
hypre_CSRMatrix *G_diag;
/* fetch G matrix (G_type = 0 ==> node to edge) */
if (G_type == 0)
{
nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr);
ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr);
G_diag = hypre_ParCSRMatrixDiag(G_csr);
G_diag_i = hypre_CSRMatrixI(G_diag);
G_diag_j = hypre_CSRMatrixJ(G_diag);
}
else
{
nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr);
ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr);
G_diag = hypre_ParCSRMatrixDiag(G_csr);
T_diag_i = hypre_CSRMatrixI(G_diag);
T_diag_j = hypre_CSRMatrixJ(G_diag);
counts = (HYPRE_Int *) malloc(nrows_G * sizeof(HYPRE_Int));
for (i = 0; i < nrows_G; i++) counts[i] = 0;
for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++;
G_diag_i = (HYPRE_Int *) malloc((nrows_G+1) * sizeof(HYPRE_Int));
G_diag_j = (HYPRE_Int *) malloc(T_diag_i[ncols_G] * sizeof(HYPRE_Int));
G_diag_i[0] = 0;
for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1];
for (i = 0; i < ncols_G; i++)
{
for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++)
{
k = T_diag_j[j];
offset = G_diag_i[k]++;
G_diag_j[offset] = i;
}
}
G_diag_i[0] = 0;
for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1];
free(counts);
}
/* form G transpose in special form (2 nodes per edge max) */
GT_diag_mat = (HYPRE_Int *) malloc(2 * ncols_G * sizeof(HYPRE_Int));
for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1;
for (i = 0; i < nrows_G; i++)
{
for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++)
{
edge = G_diag_j[j];
if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i;
else GT_diag_mat[edge*2+1] = i;
}
}
/* BFS on the local matrix graph to find tree */
nodes_marked = (HYPRE_Int *) malloc(nrows_G * sizeof(HYPRE_Int));
edges_marked = (HYPRE_Int *) malloc(ncols_G * sizeof(HYPRE_Int));
for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0;
for (i = 0; i < ncols_G; i++) edges_marked[i] = 0;
queue = (HYPRE_Int *) malloc(nrows_G * sizeof(HYPRE_Int));
queue_head = 0;
queue_tail = 1;
queue[0] = 0;
nodes_marked[0] = 1;
while ((queue_tail-queue_head) > 0)
{
node = queue[queue_tail-1];
queue_tail--;
for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++)
{
edge = G_diag_j[i];
if (edges_marked[edge] == 0)
{
if (GT_diag_mat[2*edge+1] != -1)
{
node2 = GT_diag_mat[2*edge];
if (node2 == node) node2 = GT_diag_mat[2*edge+1];
if (nodes_marked[node2] == 0)
{
nodes_marked[node2] = 1;
edges_marked[edge] = 1;
queue[queue_tail] = node2;
queue_tail++;
}
}
}
}
}
free(nodes_marked);
free(queue);
free(GT_diag_mat);
/* fetch the communication information from */
comm = hypre_ParCSRMatrixComm(G_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr);
if (nprocs == 1 && comm_pkg == NULL)
{
hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr);
comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr);
}
/* construct processor graph based on node-edge connection */
/* (local edges connected to neighbor processor nodes) */
n_children = 0;
nrecvs = nsends = 0;
if (nprocs > 1)
{
nsends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
proc_array = NULL;
if ((nsends+nrecvs) > 0)
{
n_proc_array = 0;
proc_array = (HYPRE_Int *) malloc((nsends+nrecvs) * sizeof(HYPRE_Int));
for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i];
for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i];
hypre_qsort0(proc_array, 0, nsends+nrecvs-1);
n_proc_array = 1;
for (i = 1; i < nrecvs+nsends; i++)
if (proc_array[i] != proc_array[n_proc_array])
proc_array[n_proc_array++] = proc_array[i];
}
pgraph_i = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int));
recv_cnts = (HYPRE_Int *) malloc(nprocs * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1,
HYPRE_MPI_INT, comm);
pgraph_i[0] = 0;
for (i = 1; i <= nprocs; i++)
pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1];
pgraph_j = (HYPRE_Int *) malloc(pgraph_i[nprocs] * sizeof(HYPRE_Int));
hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j,
recv_cnts, pgraph_i, HYPRE_MPI_INT, comm);
free(recv_cnts);
/* BFS on the processor graph to determine parent and children */
nodes_marked = (HYPRE_Int *) malloc(nprocs * sizeof(HYPRE_Int));
for (i = 0; i < nprocs; i++) nodes_marked[i] = -1;
queue = (HYPRE_Int *) malloc(nprocs * sizeof(HYPRE_Int));
queue_head = 0;
queue_tail = 1;
node = 0;
queue[0] = node;
while ((queue_tail-queue_head) > 0)
{
proc = queue[queue_tail-1];
queue_tail--;
for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++)
{
proc2 = pgraph_j[i];
if (nodes_marked[proc2] < 0)
{
nodes_marked[proc2] = proc;
queue[queue_tail] = proc2;
queue_tail++;
}
}
}
parent = nodes_marked[mypid];
n_children = 0;
for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++;
if (n_children == 0) {n_children = 0; children = NULL;}
else
{
children = (HYPRE_Int *) malloc(n_children * sizeof(HYPRE_Int));
n_children = 0;
for (i = 0; i < nprocs; i++)
if (nodes_marked[i] == mypid) children[n_children++] = i;
}
free(nodes_marked);
free(queue);
free(pgraph_i);
free(pgraph_j);
}
/* first, connection with my parent : if the edge in my parent *
* is incident to one of my nodes, then my parent will mark it */
found = 0;
for (i = 0; i < nrecvs; i++)
{
proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
if (proc == parent)
{
found = 1;
break;
}
}
/* but if all the edges connected to my parent are on my side, *
* then I will just pick one of them as tree edge */
if (found == 0)
{
for (i = 0; i < nsends; i++)
{
proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (proc == parent)
{
k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i);
edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k);
edges_marked[edge] = 1;
break;
}
}
}
/* next, if my processor has an edge incident on one node in my *
* child, put this edge on the tree. But if there is no such *
* edge, then I will assume my child will pick up an edge */
for (j = 0; j < n_children; j++)
{
proc = children[j];
for (i = 0; i < nsends; i++)
{
proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (proc == proc2)
{
k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i);
edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k);
edges_marked[edge] = 1;
break;
}
}
}
if (n_children > 0) free(children);
/* count the size of the tree */
tree_size = 0;
for (i = 0; i < ncols_G; i++)
if (edges_marked[i] == 1) tree_size++;
t_indices = (HYPRE_Int *) malloc((tree_size+1) * sizeof(HYPRE_Int));
t_indices[0] = tree_size;
tree_size = 1;
for (i = 0; i < ncols_G; i++)
if (edges_marked[i] == 1) t_indices[tree_size++] = i;
(*indices) = t_indices;
free(edges_marked);
if (G_type != 0)
{
free(G_diag_i);
free(G_diag_j);
}
}
/* -----------------------------------------------------------------------------
* extract submatrices based on given indices
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr,
HYPRE_Int *indices2,
hypre_ParCSRMatrix ***submatrices )
{
HYPRE_Int nindices, *indices, nrows_A, *A_diag_i, *A_diag_j, mypid, nprocs;
HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *itmp_array, *exp_indices;
HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag;
HYPRE_Int global_nrows, global_ncols, *row_starts, *col_starts, nrows, nnz;
HYPRE_Int *diag_i, *diag_j, row, *offd_i;
HYPRE_Complex *A_diag_a, *diag_a;
hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr;
hypre_CSRMatrix *A_diag, *diag, *offd;
MPI_Comm comm;
/* -----------------------------------------------------
* first make sure the incoming indices are in order
* ----------------------------------------------------- */
nindices = indices2[0];
indices = &(indices2[1]);
hypre_qsort0(indices, 0, nindices-1);
/* -----------------------------------------------------
* fetch matrix information
* ----------------------------------------------------- */
nrows_A = hypre_ParCSRMatrixGlobalNumRows(A_csr);
A_diag = hypre_ParCSRMatrixDiag(A_csr);
A_diag_i = hypre_CSRMatrixI(A_diag);
A_diag_j = hypre_CSRMatrixJ(A_diag);
A_diag_a = hypre_CSRMatrixData(A_diag);
comm = hypre_ParCSRMatrixComm(A_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
if (nprocs > 1)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n");
exit(1);
}
/* -----------------------------------------------------
* compute new matrix dimensions
* ----------------------------------------------------- */
proc_offsets1 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int));
proc_offsets2 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1,
HYPRE_MPI_INT, comm);
k = 0;
for (i = 0; i < nprocs; i++)
{
j = proc_offsets1[i];
proc_offsets1[i] = k;
k += j;
}
proc_offsets1[nprocs] = k;
itmp_array = hypre_ParCSRMatrixRowStarts(A_csr);
for (i = 0; i <= nprocs; i++)
proc_offsets2[i] = itmp_array[i] - proc_offsets1[i];
/* -----------------------------------------------------
* assign id's to row and col for later processing
* ----------------------------------------------------- */
exp_indices = (HYPRE_Int *) malloc(nrows_A * sizeof(HYPRE_Int));
for (i = 0; i < nrows_A; i++) exp_indices[i] = -1;
for (i = 0; i < nindices; i++)
{
if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i;
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n");
exit(1);
}
}
k = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
exp_indices[i] = - k - 1;
k++;
}
}
/* -----------------------------------------------------
* compute number of nonzeros for each block
* ----------------------------------------------------- */
nnz11 = nnz12 = nnz21 = nnz22 = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) nnz11++;
else nnz12++;
}
}
else
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) nnz21++;
else nnz22++;
}
}
}
/* -----------------------------------------------------
* create A11 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz11;
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* This case is not yet implemented! */
global_nrows = 0;
global_ncols = 0;
row_starts = NULL;
col_starts = NULL;
#else
global_nrows = proc_offsets1[nprocs];
global_ncols = proc_offsets1[nprocs];
row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = proc_offsets1[i];
col_starts[i] = proc_offsets1[i];
}
#endif
A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A11_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A11_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A12 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz12;
global_nrows = proc_offsets1[nprocs];
global_ncols = proc_offsets2[nprocs];
row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = proc_offsets1[i];
col_starts[i] = proc_offsets2[i];
}
A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0)
{
diag_j[nnz] = - exp_indices[col] - 1;
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
if (nnz > nnz_diag) hypre_error(HYPRE_ERROR_GENERIC);
/*hypre_printf("WARNING WARNING WARNING\n");*/
diag = hypre_ParCSRMatrixDiag(A12_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A12_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A21 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz21;
global_nrows = proc_offsets2[nprocs];
global_ncols = proc_offsets1[nprocs];
row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = proc_offsets2[i];
col_starts[i] = proc_offsets1[i];
}
A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A21_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A21_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A22 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz22;
global_nrows = proc_offsets2[nprocs];
global_ncols = proc_offsets2[nprocs];
row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = proc_offsets2[i];
col_starts[i] = proc_offsets2[i];
}
A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0)
{
diag_j[nnz] = - exp_indices[col] - 1;
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A22_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A22_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* hand the matrices back to the caller and clean up
* ----------------------------------------------------- */
(*submatrices)[0] = A11_csr;
(*submatrices)[1] = A12_csr;
(*submatrices)[2] = A21_csr;
(*submatrices)[3] = A22_csr;
free(proc_offsets1);
free(proc_offsets2);
free(exp_indices);
}
/* -----------------------------------------------------------------------------
* extract submatrices of a rectangular matrix
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr,
HYPRE_Int *indices2,
hypre_ParCSRMatrix ***submatrices )
{
HYPRE_Int nindices, *indices, nrows_A, *A_diag_i, *A_diag_j, mypid, nprocs;
HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *itmp_array, *exp_indices;
HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag;
HYPRE_Int *A_offd_i, *A_offd_j;
HYPRE_Int global_nrows, global_ncols, *row_starts, *col_starts, nrows, nnz;
HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd;
HYPRE_Complex *A_diag_a, *diag_a, *offd_a;
hypre_ParCSRMatrix *A11_csr, *A21_csr;
hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd;
MPI_Comm comm;
/* -----------------------------------------------------
* first make sure the incoming indices are in order
* ----------------------------------------------------- */
nindices = indices2[0];
indices = &(indices2[1]);
hypre_qsort0(indices, 0, nindices-1);
/* -----------------------------------------------------
* fetch matrix information
* ----------------------------------------------------- */
nrows_A = hypre_ParCSRMatrixGlobalNumRows(A_csr);
A_diag = hypre_ParCSRMatrixDiag(A_csr);
A_diag_i = hypre_CSRMatrixI(A_diag);
A_diag_j = hypre_CSRMatrixJ(A_diag);
A_diag_a = hypre_CSRMatrixData(A_diag);
A_offd = hypre_ParCSRMatrixOffd(A_csr);
A_offd_i = hypre_CSRMatrixI(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
comm = hypre_ParCSRMatrixComm(A_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
/* -----------------------------------------------------
* compute new matrix dimensions
* ----------------------------------------------------- */
proc_offsets1 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int));
proc_offsets2 = (HYPRE_Int *) malloc((nprocs+1) * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1,
HYPRE_MPI_INT, comm);
k = 0;
for (i = 0; i < nprocs; i++)
{
j = proc_offsets1[i];
proc_offsets1[i] = k;
k += j;
}
proc_offsets1[nprocs] = k;
itmp_array = hypre_ParCSRMatrixRowStarts(A_csr);
for (i = 0; i <= nprocs; i++)
proc_offsets2[i] = itmp_array[i] - proc_offsets1[i];
/* -----------------------------------------------------
* assign id's to row and col for later processing
* ----------------------------------------------------- */
exp_indices = (HYPRE_Int *) malloc(nrows_A * sizeof(HYPRE_Int));
for (i = 0; i < nrows_A; i++) exp_indices[i] = -1;
for (i = 0; i < nindices; i++)
{
if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i;
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n");
exit(1);
}
}
k = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
exp_indices[i] = - k - 1;
k++;
}
}
/* -----------------------------------------------------
* compute number of nonzeros for each block
* ----------------------------------------------------- */
nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) nnz11++;
}
nnz11_offd += A_offd_i[i+1] - A_offd_i[i];
}
else
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0) nnz21++;
}
nnz21_offd += A_offd_i[i+1] - A_offd_i[i];
}
}
/* -----------------------------------------------------
* create A11 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr));
nnz_diag = nnz11;
nnz_offd = nnz11_offd;
global_nrows = proc_offsets1[nprocs];
itmp_array = hypre_ParCSRMatrixColStarts(A_csr);
global_ncols = itmp_array[nprocs];
row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = proc_offsets1[i];
col_starts[i] = itmp_array[i];
}
A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A11_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd);
offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd);
nnz = 0;
row = 0;
offd_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
offd_j[nnz] = A_offd_j[j];
offd_a[nnz++] = A_diag_a[j];
}
row++;
offd_i[row] = nnz;
}
}
offd = hypre_ParCSRMatrixOffd(A11_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_a;
/* -----------------------------------------------------
* create A21 matrix
* ----------------------------------------------------- */
ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr));
nnz_offd = nnz21_offd;
nnz_diag = nnz21;
global_nrows = proc_offsets2[nprocs];
itmp_array = hypre_ParCSRMatrixColStarts(A_csr);
global_ncols = itmp_array[nprocs];
row_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
col_starts = hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = proc_offsets2[i];
col_starts[i] = itmp_array[i];
}
A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
diag_j[nnz] = A_diag_j[j];
diag_a[nnz++] = A_diag_a[j];
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A21_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1);
offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd);
offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd);
nnz = 0;
row = 0;
offd_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
offd_j[nnz] = A_offd_j[j];
offd_a[nnz++] = A_diag_a[j];
}
row++;
offd_i[row] = nnz;
}
}
offd = hypre_ParCSRMatrixOffd(A21_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_a;
/* -----------------------------------------------------
* hand the matrices back to the caller and clean up
* ----------------------------------------------------- */
(*submatrices)[0] = A11_csr;
(*submatrices)[1] = A21_csr;
free(proc_offsets1);
free(proc_offsets2);
free(exp_indices);
}
/* -----------------------------------------------------------------------------
* return the sum of all local elements of the matrix
* ----------------------------------------------------------------------------- */
HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A )
{
hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A );
hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A );
return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatAminvDB
* computes C = (A - inv(D)B) where D is a diagonal matrix
* Note: Data structure of A is expected to be a subset of data structure of B!
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B,
HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(B);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_ParCSRMatrix *C = NULL;
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_Int num_sends_B, num_recvs_B;
HYPRE_Int i, j, cnt;
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
hypre_CSRMatrix *C_diag = NULL;
hypre_CSRMatrix *C_offd = NULL;
HYPRE_Int *C_diag_i = NULL;
HYPRE_Int *C_diag_j = NULL;
HYPRE_Complex *C_diag_data = NULL;
HYPRE_Int *C_offd_i = NULL;
HYPRE_Int *C_offd_j = NULL;
HYPRE_Complex *C_offd_data = NULL;
HYPRE_Int num_procs, my_id;
HYPRE_Int *recv_procs_B;
HYPRE_Int *send_procs_B;
HYPRE_Int *recv_vec_starts_B;
HYPRE_Int *send_map_starts_B;
HYPRE_Int *send_map_elmts_B;
hypre_ParCSRCommPkg *comm_pkg_C;
HYPRE_Int *recv_procs_C;
HYPRE_Int *send_procs_C;
HYPRE_Int *recv_vec_starts_C;
HYPRE_Int *send_map_starts_C;
HYPRE_Int *send_map_elmts_C;
HYPRE_Int *map_to_B;
/*HYPRE_Int *C_diag_array;
HYPRE_Int *C_offd_array;*/
HYPRE_Complex *D_tmp;
HYPRE_Int size, rest, num_threads, ii;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads);
C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads);*/
/*---------------------------------------------------------------------
* If there exists no CommPkg for B, a CommPkg is generated
*--------------------------------------------------------------------*/
if (!comm_pkg_B)
{
hypre_MatvecCommPkgCreate(B);
comm_pkg_B = hypre_ParCSRMatrixCommPkg(B);
}
C = hypre_ParCSRMatrixCompleteClone(B);
/*hypre_ParCSRMatrixInitialize(C);*/
C_diag = hypre_ParCSRMatrixDiag(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd = hypre_ParCSRMatrixOffd(C);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows);
if (num_cols_offd_A)
{
map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A);
cnt = 0;
for (i=0; i < num_cols_offd_A; i++)
{
while (col_map_offd_B[cnt] < col_map_offd_A[i])
{
cnt++;
}
map_to_B[i] = cnt;
cnt++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j)
#endif
for (ii=0; ii < num_threads; ii++)
{
HYPRE_Int *A_marker = NULL;
HYPRE_Int ns, ne, A_col, num_cols, nmax;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
nmax = hypre_max(num_rows, num_cols_offd_B);
A_marker = hypre_CTAlloc(HYPRE_Int, nmax);
for (i=0; i < num_rows; i++)
A_marker[i] = -1;
for (i=ns; i < ne; i++)
D_tmp[i] = 1.0/d[i];
num_cols = C_diag_i[ns];
for (i=ns; i < ne; i++)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
A_col = A_diag_j[j];
if (A_marker[A_col] < C_diag_i[i])
{
A_marker[A_col] = num_cols;
C_diag_j[num_cols] = A_col;
C_diag_data[num_cols] = A_diag_data[j];
num_cols++;
}
else
{
C_diag_data[A_marker[A_col]] += A_diag_data[j];
}
}
for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++)
{
A_col = B_diag_j[j];
if (A_marker[A_col] < C_diag_i[i])
{
A_marker[A_col] = num_cols;
C_diag_j[num_cols] = A_col;
C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j];
num_cols++;
}
else
{
C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j];
}
}
}
for (i=0; i < num_cols_offd_B; i++)
A_marker[i] = -1;
num_cols = C_offd_i[ns];
for (i=ns; i < ne; i++)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
A_col = map_to_B[A_offd_j[j]];
if (A_marker[A_col] < B_offd_i[i])
{
A_marker[A_col] = num_cols;
C_offd_j[num_cols] = A_col;
C_offd_data[num_cols] = A_offd_data[j];
num_cols++;
}
else
{
C_offd_data[A_marker[A_col]] += A_offd_data[j];
}
}
for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++)
{
A_col = B_offd_j[j];
if (A_marker[A_col] < B_offd_i[i])
{
A_marker[A_col] = num_cols;
C_offd_j[num_cols] = A_col;
C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j];
num_cols++;
}
else
{
C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j];
}
}
}
hypre_TFree(A_marker);
} /* end parallel region */
/*for (i=0; i < num_cols_offd_B; i++)
col_map_offd_C[i] = col_map_offd_B[i]; */
num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B);
num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B);
recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B);
recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B);
send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B);
send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B);
send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B);
recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B);
recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1);
send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B);
send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1);
send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B]);
for (i=0; i < num_recvs_B; i++)
recv_procs_C[i] = recv_procs_B[i];
for (i=0; i < num_recvs_B+1; i++)
recv_vec_starts_C[i] = recv_vec_starts_B[i];
for (i=0; i < num_sends_B; i++)
send_procs_C[i] = send_procs_B[i];
for (i=0; i < num_sends_B+1; i++)
send_map_starts_C[i] = send_map_starts_B[i];
for (i=0; i < send_map_starts_B[num_sends_B]; i++)
send_map_elmts_C[i] = send_map_elmts_B[i];
comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(comm_pkg_C) = comm;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C;
hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B;
hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C;
hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C;
hypre_TFree(D_tmp);
if (num_cols_offd_A) hypre_TFree(map_to_B);
*C_ptr = C;
return (hypre_error_flag);
}
/*--------------------------------------------------------------------------
* hypre_ParTMatmul : multiplies two ParCSRMatrices transpose(A) and B and returns
* the product in ParCSRMatrix C
* Note that C does not own the partitionings since its row_starts
* is owned by A and col_starts by B.
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *hypre_ParTMatmul( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *AT_diag = NULL;
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *AT_offd = NULL;
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_Int first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B);
HYPRE_Int *col_starts_A = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int *col_starts_B = hypre_ParCSRMatrixColStarts(B);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_ParCSRMatrix *C;
HYPRE_Int *col_map_offd_C = NULL;
HYPRE_Int *map_B_to_C;
hypre_CSRMatrix *C_diag = NULL;
hypre_CSRMatrix *C_tmp_diag = NULL;
HYPRE_Complex *C_diag_data = NULL;
HYPRE_Int *C_diag_i = NULL;
HYPRE_Int *C_diag_j = NULL;
HYPRE_Int first_col_diag_C;
HYPRE_Int last_col_diag_C;
hypre_CSRMatrix *C_offd = NULL;
hypre_CSRMatrix *C_tmp_offd = NULL;
hypre_CSRMatrix *C_int = NULL;
hypre_CSRMatrix *C_ext = NULL;
HYPRE_Int *C_ext_i;
HYPRE_Int *C_ext_j;
HYPRE_Complex *C_ext_data;
HYPRE_Int *C_ext_diag_i;
HYPRE_Int *C_ext_diag_j;
HYPRE_Complex *C_ext_diag_data;
HYPRE_Int *C_ext_offd_i;
HYPRE_Int *C_ext_offd_j;
HYPRE_Complex *C_ext_offd_data;
HYPRE_Int C_ext_size = 0;
HYPRE_Int C_ext_diag_size = 0;
HYPRE_Int C_ext_offd_size = 0;
HYPRE_Int *C_tmp_diag_i;
HYPRE_Int *C_tmp_diag_j;
HYPRE_Complex *C_tmp_diag_data;
HYPRE_Int *C_tmp_offd_i;
HYPRE_Int *C_tmp_offd_j;
HYPRE_Complex *C_tmp_offd_data;
HYPRE_Complex *C_offd_data=NULL;
HYPRE_Int *C_offd_i=NULL;
HYPRE_Int *C_offd_j=NULL;
HYPRE_Int *temp;
HYPRE_Int *send_map_starts_A;
HYPRE_Int *send_map_elmts_A;
HYPRE_Int num_sends_A;
HYPRE_Int num_cols_offd_C = 0;
HYPRE_Int *P_marker;
HYPRE_Int i, j;
HYPRE_Int i1, j_indx;
HYPRE_Int n_rows_A, n_cols_A;
HYPRE_Int n_rows_B, n_cols_B;
/*HYPRE_Int allsquare = 0;*/
HYPRE_Int cnt, cnt_offd, cnt_diag;
HYPRE_Int value;
HYPRE_Int num_procs, my_id;
HYPRE_Int max_num_threads;
HYPRE_Int *C_diag_array = NULL;
HYPRE_Int *C_offd_array = NULL;
HYPRE_Int first_row_index, first_col_diag;
HYPRE_Int local_num_rows, local_num_cols;
n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A);
n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A);
n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B);
n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
if (n_rows_A != n_rows_B || num_rows_diag_A != num_rows_diag_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n");
return NULL;
}
/*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/
hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1);
hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1);
C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag);
C_ext_size = 0;
if (num_procs > 1)
{
hypre_CSRMatrix *C_int_diag;
hypre_CSRMatrix *C_int_offd;
C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd);
C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag);
C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd);
hypre_ParCSRMatrixDiag(B) = C_int_diag;
hypre_ParCSRMatrixOffd(B) = C_int_offd;
C_int = hypre_MergeDiagAndOffd(B);
hypre_ParCSRMatrixDiag(B) = B_diag;
hypre_ParCSRMatrixOffd(B) = B_offd;
C_ext = hypre_ExchangeRAPData(C_int, comm_pkg_A);
C_ext_i = hypre_CSRMatrixI(C_ext);
C_ext_j = hypre_CSRMatrixJ(C_ext);
C_ext_data = hypre_CSRMatrixData(C_ext);
C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)];
hypre_CSRMatrixDestroy(C_int);
hypre_CSRMatrixDestroy(C_int_diag);
hypre_CSRMatrixDestroy(C_int_offd);
}
else
{
C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0);
hypre_CSRMatrixInitialize(C_tmp_offd);
}
hypre_CSRMatrixDestroy(AT_diag);
hypre_CSRMatrixDestroy(AT_offd);
/*-----------------------------------------------------------------------
* Add contents of C_ext to C_tmp_diag and C_tmp_offd
* to obtain C_diag and C_offd
*-----------------------------------------------------------------------*/
/* check for new nonzero columns in C_offd generated through C_ext */
first_col_diag_C = first_col_diag_B;
last_col_diag_C = first_col_diag_B + num_cols_diag_B - 1;
C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag);
if (C_ext_size || num_cols_offd_B)
{
HYPRE_Int C_ext_num_rows;
num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A);
C_ext_num_rows = send_map_starts_A[num_sends_A];
C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1);
C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1);
temp = hypre_CTAlloc(HYPRE_Int, C_ext_size+num_cols_offd_B);
C_ext_diag_size = 0;
C_ext_offd_size = 0;
for (i=0; i < C_ext_num_rows; i++)
{
for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++)
if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C)
temp[C_ext_offd_size++] = C_ext_j[j];
else
C_ext_diag_size++;
C_ext_diag_i[i+1] = C_ext_diag_size;
C_ext_offd_i[i+1] = C_ext_offd_size;
}
cnt = C_ext_offd_size;
for (i=0; i < num_cols_offd_B; i++)
temp[cnt++] = col_map_offd_B[i];
if (cnt)
{
hypre_qsort0(temp,0,cnt-1);
value = temp[0];
num_cols_offd_C = 1;
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
col_map_offd_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C);
for (i=0; i < num_cols_offd_C; i++)
col_map_offd_C[i] = temp[i];
hypre_TFree(temp);
if (C_ext_diag_size)
{
C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size);
C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size);
}
if (C_ext_offd_size)
{
C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size);
C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size);
}
C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag);
C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag);
C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd);
C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd);
C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd);
cnt_offd = 0;
cnt_diag = 0;
for (i=0; i < C_ext_num_rows; i++)
{
for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++)
if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C)
{
C_ext_offd_j[cnt_offd] = hypre_BinarySearch(col_map_offd_C,
C_ext_j[j],
num_cols_offd_C);
C_ext_offd_data[cnt_offd++] = C_ext_data[j];
}
else
{
C_ext_diag_j[cnt_diag] = C_ext_j[j] - first_col_diag_C;
C_ext_diag_data[cnt_diag++] = C_ext_data[j];
}
}
}
if (C_ext)
{
hypre_CSRMatrixDestroy(C_ext);
C_ext = NULL;
}
if (num_cols_offd_B)
{
map_B_to_C = hypre_CTAlloc(HYPRE_Int,num_cols_offd_B);
cnt = 0;
for (i=0; i < num_cols_offd_C; i++)
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
if (cnt == num_cols_offd_B) break;
}
for (i=0;
i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++)
{
j_indx = C_tmp_offd_j[i];
C_tmp_offd_j[i] = map_B_to_C[j_indx];
}
}
/*-----------------------------------------------------------------------
* Need to compute C_diag = C_tmp_diag + C_ext_diag
* and C_offd = C_tmp_offd + C_ext_offd !!!!
* First generate structure
*-----------------------------------------------------------------------*/
if (C_ext_size || num_cols_offd_B)
{
C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1);
C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1);
C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads);
C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int *B_marker_offd = NULL;
HYPRE_Int ik, jk, j1, j2, jcol;
HYPRE_Int ns, ne, ii, nnz_d, nnz_o;
HYPRE_Int rest, size;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_diag_A/num_threads;
rest = num_cols_diag_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B);
B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C);
for (ik = 0; ik < num_cols_diag_B; ik++)
B_marker[ik] = -1;
for (ik = 0; ik < num_cols_offd_C; ik++)
B_marker_offd[ik] = -1;
nnz_d = 0;
nnz_o = 0;
for (ik = ns; ik < ne; ik++)
{
for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++)
{
jcol = C_tmp_diag_j[jk];
B_marker[jcol] = ik;
nnz_d++;
}
for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++)
{
jcol = C_tmp_offd_j[jk];
B_marker_offd[jcol] = ik;
nnz_o++;
}
for (jk = 0; jk < num_sends_A; jk++)
for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++)
if (send_map_elmts_A[j1] == ik)
{
for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++)
{
jcol = C_ext_diag_j[j2];
if (B_marker[jcol] < ik)
{
B_marker[jcol] = ik;
nnz_d++;
}
}
for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++)
{
jcol = C_ext_offd_j[j2];
if (B_marker_offd[jcol] < ik)
{
B_marker_offd[jcol] = ik;
nnz_o++;
}
}
break;
}
C_diag_array[ii] = nnz_d;
C_offd_array[ii] = nnz_o;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
nnz_d = 0;
nnz_o = 0;
for (ik = 0; ik < num_threads-1; ik++)
{
C_diag_array[ik+1] += C_diag_array[ik];
C_offd_array[ik+1] += C_offd_array[ik];
}
nnz_d = C_diag_array[num_threads-1];
nnz_o = C_offd_array[num_threads-1];
C_diag_i[num_cols_diag_A] = nnz_d;
C_offd_i[num_cols_diag_A] = nnz_o;
C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d);
C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o);
hypre_CSRMatrixI(C_diag) = C_diag_i;
hypre_CSRMatrixInitialize(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_CSRMatrixInitialize(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*-----------------------------------------------------------------------
* Need to compute C_diag = C_tmp_diag + C_ext_diag
* and C_offd = C_tmp_offd + C_ext_offd !!!!
* Now fill in values
*-----------------------------------------------------------------------*/
for (ik = 0; ik < num_cols_diag_B; ik++)
B_marker[ik] = -1;
for (ik = 0; ik < num_cols_offd_C; ik++)
B_marker_offd[ik] = -1;
/*-----------------------------------------------------------------------
* Populate matrices
*-----------------------------------------------------------------------*/
nnz_d = 0;
nnz_o = 0;
nnz_o = 0;
if (ii)
{
nnz_d = C_diag_array[ii-1];
nnz_o = C_offd_array[ii-1];
}
for (ik = ns; ik < ne; ik++)
{
C_diag_i[ik] = nnz_d;
C_offd_i[ik] = nnz_o;
for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++)
{
jcol = C_tmp_diag_j[jk];
C_diag_j[nnz_d] = jcol;
C_diag_data[nnz_d] = C_tmp_diag_data[jk];
B_marker[jcol] = nnz_d;
nnz_d++;
}
for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++)
{
jcol = C_tmp_offd_j[jk];
C_offd_j[nnz_o] = jcol;
C_offd_data[nnz_o] = C_tmp_offd_data[jk];
B_marker_offd[jcol] = nnz_o;
nnz_o++;
}
for (jk = 0; jk < num_sends_A; jk++)
for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++)
if (send_map_elmts_A[j1] == ik)
{
for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++)
{
jcol = C_ext_diag_j[j2];
if (B_marker[jcol] < C_diag_i[ik])
{
C_diag_j[nnz_d] = jcol;
C_diag_data[nnz_d] = C_ext_diag_data[j2];
B_marker[jcol] = nnz_d;
nnz_d++;
}
else
C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2];
}
for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++)
{
jcol = C_ext_offd_j[j2];
if (B_marker_offd[jcol] < C_offd_i[ik])
{
C_offd_j[nnz_o] = jcol;
C_offd_data[nnz_o] = C_ext_offd_data[j2];
B_marker_offd[jcol] = nnz_o;
nnz_o++;
}
else
C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2];
}
break;
}
}
hypre_TFree(B_marker);
hypre_TFree(B_marker_offd);
} /*end parallel region */
hypre_TFree(C_diag_array);
hypre_TFree(C_offd_array);
}
/*C = hypre_ParCSRMatrixCreate(comm, n_cols_A, n_cols_B, col_starts_A,
col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = col_starts_A[0];
local_num_rows = col_starts_A[1]-first_row_index ;
first_col_diag = col_starts_B[0];
local_num_cols = col_starts_B[1]-first_col_diag;
#else
first_row_index = col_starts_A[my_id];
local_num_rows = col_starts_A[my_id+1]-first_row_index;
first_col_diag = col_starts_B[my_id];
local_num_cols = col_starts_B[my_id+1]-first_col_diag;
#endif
C = hypre_CTAlloc(hypre_ParCSRMatrix, 1);
hypre_ParCSRMatrixComm(C) = comm;
hypre_ParCSRMatrixGlobalNumRows(C) = n_cols_A;
hypre_ParCSRMatrixGlobalNumCols(C) = n_cols_B;
hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(C) = NULL;
hypre_ParCSRMatrixAssumedPartition(C) = NULL;
hypre_ParCSRMatrixRowStarts(C) = col_starts_A;
hypre_ParCSRMatrixColStarts(C) = col_starts_B;
hypre_ParCSRMatrixCommPkg(C) = NULL;
hypre_ParCSRMatrixCommPkgT(C) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(C) = 1;
hypre_ParCSRMatrixRowindices(C) = NULL;
hypre_ParCSRMatrixRowvalues(C) = NULL;
hypre_ParCSRMatrixGetrowactive(C) = 0;
/* Note that C does not own the partitionings */
hypre_ParCSRMatrixSetRowStartsOwner(C,0);
hypre_ParCSRMatrixSetColStartsOwner(C,0);
if (C_diag) hypre_ParCSRMatrixDiag(C) = C_diag;
else hypre_ParCSRMatrixDiag(C) = C_tmp_diag;
if (C_offd) hypre_ParCSRMatrixOffd(C) = C_offd;
else hypre_ParCSRMatrixOffd(C) = C_tmp_offd;
if (num_cols_offd_C)
{
HYPRE_Int jj_count_offd, nnz_offd;
HYPRE_Int *new_col_map_offd_C = NULL;
P_marker = hypre_CTAlloc(HYPRE_Int,num_cols_offd_C);
for (i=0; i < num_cols_offd_C; i++)
P_marker[i] = -1;
jj_count_offd = 0;
nnz_offd = C_offd_i[num_cols_diag_A];
for (i=0; i < nnz_offd; i++)
{
i1 = C_offd_j[i];
if (P_marker[i1])
{
P_marker[i1] = 0;
jj_count_offd++;
}
}
if (jj_count_offd < num_cols_offd_C)
{
new_col_map_offd_C = hypre_CTAlloc(HYPRE_Int,jj_count_offd);
jj_count_offd = 0;
for (i=0; i < num_cols_offd_C; i++)
if (!P_marker[i])
{
P_marker[i] = jj_count_offd;
new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i];
}
for (i=0; i < nnz_offd; i++)
{
i1 = C_offd_j[i];
C_offd_j[i] = P_marker[i1];
}
num_cols_offd_C = jj_count_offd;
hypre_TFree(col_map_offd_C);
col_map_offd_C = new_col_map_offd_C;
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C;
}
hypre_TFree(P_marker);
}
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
if (C_ext_size || num_cols_offd_B)
{
hypre_TFree(C_ext_diag_i);
hypre_TFree(C_ext_offd_i);
}
if (C_ext_diag_size)
{
hypre_TFree(C_ext_diag_j);
hypre_TFree(C_ext_diag_data);
}
if (C_ext_offd_size)
{
hypre_TFree(C_ext_offd_j);
hypre_TFree(C_ext_offd_data);
}
if (num_cols_offd_B) hypre_TFree(map_B_to_C);
if (C_diag) hypre_CSRMatrixDestroy(C_tmp_diag);
if (C_offd) hypre_CSRMatrixDestroy(C_tmp_offd);
return C;
}
|
update_ops_matrix_dense_multi.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
//void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim);
//void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim);
void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask);
void multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
if (target_qubit_index_count == 1) {
single_qubit_dense_matrix_gate(target_qubit_index_list[0], matrix, state, dim);
}
else if (target_qubit_index_count == 2) {
double_qubit_dense_matrix_gate_c(target_qubit_index_list[0], target_qubit_index_list[1], matrix, state, dim);
}
else {
//multi_qubit_dense_matrix_gate_old_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_old_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//return;
#ifdef _OPENMP
UINT threshold = 8;
if (dim < (((ITYPE)1) << threshold)) {
multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
}
else {
multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
}
#else
multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
#endif
}
}
void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask) {
memcpy(dst_array, array, sizeof(UINT)*count);
sort_ui(dst_array, count);
for (UINT i = 0; i < count; ++i) {
dst_mask[i] = (1UL << dst_array[i]) - 1;
}
}
void multi_qubit_dense_matrix_gate_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
UINT sort_array[64];
ITYPE mask_array[64];
create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array);
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) {
basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
free((ITYPE*)matrix_mask_list);
}
#ifdef _OPENMP
void multi_qubit_dense_matrix_gate_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
UINT sort_array[64];
ITYPE mask_array[64];
create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array);
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) {
basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
free((ITYPE*)matrix_mask_list);
}
#endif
/*
void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
#ifdef _OPENMP
void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
#endif
*/ |
ten_tusscher_2004_RS_CPU_epi_S0.c | // Original Ten Tusscher - Scenario 0
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S0.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state conditions
real sv_sst[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
#ifdef EPI
real Gks=0.245;
#endif
#ifdef ENDO
real Gks=0.245;
#endif
#ifdef MCELL
real Gks=0.062;
#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
#ifdef EPI
real Gto=0.294;
#endif
#ifdef ENDO
real Gto=0.073;
#endif
#ifdef MCELL
real Gto=0.294;
#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
ep.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB EP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program EMBAR
//---------------------------------------------------------------------
// M is the Log_2 of the number of complex pairs of uniform (0, 1) random
// numbers. MK is the Log_2 of the size of each batch of uniform random
// numbers. MK can be set for convenience on a given system, since it does
// not affect the results.
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "type.h"
#include "npbparams.h"
#include "randdp.h"
#include "timers.h"
#include "print_results.h"
#include "read_memory.h"
#define MAX(X,Y) (((X) > (Y)) ? (X) : (Y))
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
static double x[2*NK];
static double qq[NQ];
#pragma omp threadprivate(x,qq)
static double q[NQ];
int i1=0, i2=0, i3=-1, k_temp = 0;
double ssx,ssy;
void vranlc_temp( int n, double *x, double a, double y[] )
{
//--------------------------------------------------------------------
//
// This routine generates N uniform pseudorandom double precision numbers in
// the range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The N results are placed in Y and are normalized
// to be between 0 and 1. X is updated to contain the new seed, so that
// subsequent calls to VRANLC using the same arguments will generate a
// continuous sequence. If N is zero, only initialization is performed, and
// the variables X, A and Y are ignored.
//
// This routine is the standard version designed for scalar or RISC systems.
// However, it should produce the same results on any single processor
// computer with at least 48 mantissa bits in double precision floating point
// data. On 64 bit systems, double precision should be disabled.
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
const double r23 = 1.1920928955078125e-07;
const double r46 = r23 * r23;
const double t23 = 8.388608e+06;
const double t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
int i;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Generate N results. This loop is not vectorizable.
//--------------------------------------------------------------------
for ( i = i3+1; i < n; i++ ) {
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3) ;
*x = t3 - t46 * t4;
y[i] = r46 * (*x);
i3 = -1;
}
return;
}
int main(int argc, char *argv[])
{
pid = atoi(argv[1]);
printf("pid = %d\n",pid);
/*
crucial_data(x,"double",2*NK);
crucial_data(qq,"double",NQ);
crucial_data(q,"double",NQ);
*/
double Mops, t1, t2, t3, t4, x1, x2;
double sx, sy, tm, an, tt, gc;
double sx_verify_value, sy_verify_value, sx_err, sy_err;
int np;
int i, ik, kk, l, k, nit;
int k_offset, j;
logical verified, timers_enabled;
/*
consistent_data(&k,"int",1);
consistent_data(&i1,"int",1);
consistent_data(&i1,"int",1);
*/
double dum[3] = {1.0, 1.0, 1.0};
char size[16];
FILE *fp;
if ((fp = fopen("timer.flag", "r")) == NULL) {
timers_enabled = false;
} else {
timers_enabled = true;
fclose(fp);
}
//--------------------------------------------------------------------
// Because the size of the problem is too large to store in a 32-bit
// integer for some classes, we put it into a string (for printing).
// Have to strip off the decimal point put in there by the floating
// point print statement (internal file)
//--------------------------------------------------------------------
sprintf(size, "%15.0lf", pow(2.0, M+1));
j = 14;
if (size[j] == '.') j--;
size[j+1] = '\0';
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - EP Benchmark\n");
printf("\n Number of random numbers generated: %15s\n", size);
printf("\n Number of available threads: %13d\n", omp_get_max_threads());
verified = false;
//--------------------------------------------------------------------
// Compute the number of "batches" of random number pairs generated
// per processor. Adjust if the number of processors does not evenly
// divide the total number
//--------------------------------------------------------------------
np = NN;
//--------------------------------------------------------------------
// Call the random number generator functions and initialize
// the x-array to reduce the effects of paging on the timings.
// Also, call all mathematical functions that are used. Make
// sure these initializations cannot be eliminated as dead code.
//--------------------------------------------------------------------
vranlc(0, &dum[0], dum[1], &dum[2]);
dum[0] = randlc(&dum[1], dum[2]);
#pragma omp parallel default(shared) private(i)
{
for (i = 0; i < 2 * NK; i++) {
x[i] = -1.0e99;
}
}
Mops = log(sqrt(fabs(MAX(1.0, 1.0))));
#pragma omp parallel
{
timer_clear(0);
if (timers_enabled) timer_clear(1);
if (timers_enabled) timer_clear(2);
}
timer_start(0);
t1 = A;
vranlc(0, &t1, A, x);
//--------------------------------------------------------------------
// Compute AN = A ^ (2 * NK) (mod 2^46).
//--------------------------------------------------------------------
t1 = A;
for (i = 0; i < MK + 1; i++) {
t2 = randlc(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
for (i = 0; i < NQ; i++) {
q[i] = 0.0;
}
//--------------------------------------------------------------------
// Each instance of this loop may be performed independently. We compute
// the k offsets separately to take into account the fact that some nodes
// have more numbers to generate than others
//--------------------------------------------------------------------
k_offset = -1;
//FILE *testfile;
//testfile = fopen("resultfile1.out","w");
verified = true;
if (M == 24) {
sx_verify_value = -3.247834652034740e+3;
sy_verify_value = -6.958407078382297e+3;
} else if (M == 25) {
sx_verify_value = -2.863319731645753e+3;
sy_verify_value = -6.320053679109499e+3;
} else if (M == 28) {
sx_verify_value = -4.295875165629892e+3;
sy_verify_value = -1.580732573678431e+4;
} else if (M == 30) {
sx_verify_value = 4.033815542441498e+4;
sy_verify_value = -2.660669192809235e+4;
} else if (M == 32) {
sx_verify_value = 4.764367927995374e+4;
sy_verify_value = -8.084072988043731e+4;
} else if (M == 36) {
sx_verify_value = 1.982481200946593e+5;
sy_verify_value = -1.020596636361769e+5;
} else if (M == 40) {
sx_verify_value = -5.319717441530e+05;
sy_verify_value = -3.688834557731e+05;
} else {
verified = false;
}
#pragma omp parallel default(shared) private(k,kk,t1,t2,t3,t4,i,ik,x1,x2,l)
{
for (i = 0; i < NQ; i++) {
qq[i] = 0.0;
}
//flush_whole_cache();
//start_crash();
addr[count_addr++] = &x;
addr[count_addr++] = &qq;
addr[count_addr++] = &q;
addr[count_addr++] = &ssx;
addr[count_addr++] = &ssy;
addr[count_addr++] = &k_temp;
addr[count_addr++] = &i1;
addr[count_addr++] = &i2;
addr[count_addr++] = &i3;
ReadVarriable(addr,count_addr);
printf("k=%d\n",k_temp);
printf("i1=%d\n",i1);
printf("i2=%d\n",i2);
printf("i3=%d\n",i3);
printf("sx = %lf\n",ssx);
printf("sy = %lf\n",ssy);
sx = ssx;
sy = ssy;
//np = np + k_temp;
#pragma omp for reduction(+:sx,sy) nowait
for (k = k_temp+1; k <= np; k++) {
kk = k_offset + k;
t1 = S;
t2 = an;
/*int ijk=0;
if(k==k_temp+1){
fprintf(testfile,"before vranlc, t1 = %lf, A = %lf\n",t1, A);
for( ijk = 0; ijk<2*NK; ijk++)
{
fprintf(testfile,"%lf\n",x[ijk]);
}}*/
// Find starting seed t1 for this kk.
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if ((2 * ik) != kk) t3 = randlc(&t1, t2);
if (ik == 0) break;
t3 = randlc(&t2, t2);
kk = ik;
}
//--------------------------------------------------------------------
// Compute uniform pseudorandom numbers.
//--------------------------------------------------------------------
if (timers_enabled) timer_start(2);
vranlc(2 * NK, &t1, A, x);
if (timers_enabled) timer_stop(2);
/*if(k==2981){
fprintf(testfile,"after vranlc, t1 = %lf, A = %lf\n",t1, A);
for(ijk = 0; ijk<2*NK; ijk++)
{
fprintf(testfile,"%lf\n",x[ijk]);
}}*/
//--------------------------------------------------------------------
// Compute Gaussian deviates by acceptance-rejection method and
// tally counts in concentri//square annuli. This loop is not
// vectorizable.
//--------------------------------------------------------------------
if (timers_enabled) timer_start(1);
for (i = 0; i < NK; i++) {
x1 = 2.0 * x[2*i] - 1.0;
x2 = 2.0 * x[2*i+1] - 1.0;
t1 = x1 * x1 + x2 * x2;
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2);
t4 = (x2 * t2);
l = MAX(fabs(t3), fabs(t4));
qq[l] = qq[l] + 1.0;
sx = sx + t3;
sy = sy + t4;
}
}
if (timers_enabled) timer_stop(1);
//if (verified) {
sx_err = fabs((sx - sx_verify_value) / sx_verify_value);
sy_err = fabs((sy - sy_verify_value) / sy_verify_value);
//printf("sx_err = %25.15lE, sy_err =%25.15lE\n",sx_err, sy_err);
verified = ((sx_err <= EPSILON) && (sy_err <= EPSILON));
if(verified)
printf("k=%d, SUCCESS!\n",k);
//}
}
//end_crash();
for (i = 0; i < NQ; i++) {
#pragma omp atomic
q[i] += qq[i];
}
}
//fclose(testfile);
for (i = 0; i < NQ; i++) {
gc = gc + q[i];
}
timer_stop(0);
tm = timer_read(0);
nit = 0;
verified = 1;
if (verified) {
sx_err = fabs((sx - sx_verify_value) / sx_verify_value);
sy_err = fabs((sy - sy_verify_value) / sy_verify_value);
printf("sx_err = %25.15lE, sy_err =%25.15lE\n",sx_err, sy_err);
verified = ((sx_err <= EPSILON) && (sy_err <= EPSILON));
}
FILE *file;
char result_file[MAX_FILE_PATH] = "/home/cc/nvc/tests/recompute_result.out.jie";
sprintf(result_file + strlen(result_file), "%d", pid);
file = fopen(result_file,"w");
if(verified)
{
fprintf(file,"SUCCESS\n");
}
else{
fprintf(file,"UNSUCCESS\n");
}
fclose(file);
Mops = pow(2.0, M+1) / tm / 1000000.0;
printf("\nEP Benchmark Results:\n\n");
printf("CPU Time =%10.4lf\n", tm);
printf("N = 2^%5d\n", M);
printf("No. Gaussian Pairs = %15.0lf\n", gc);
printf("Sums = %25.15lE %25.15lE\n", sx, sy);
printf("Counts: \n");
for (i = 0; i < NQ; i++) {
printf("%3d%15.0lf\n", i, q[i]);
}
print_results("EP", CLASS, M+1, 0, 0, nit,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME, CS1,
CS2, CS3, CS4, CS5, CS6, CS7);
if (timers_enabled) {
if (tm <= 0.0) tm = 1.0;
tt = timer_read(0);
printf("\nTotal time: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm);
tt = timer_read(1);
printf("Gaussian pairs: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm);
tt = timer_read(2);
printf("Random numbers: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm);
}
return 0;
}
|
Lyra2.c | /**
* Implementation of the Lyra2 Password Hashing Scheme (PHS).
*
* Author: The Lyra PHC team (http://www.lyra2.net/) -- 2015.
*
* This software is hereby placed in the public domain.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <omp.h>
#include "Lyra2.h"
#include "Sponge.h"
#include "sse/Lyra2.h"
/**
* Executes Lyra2 based on the G function from Blake2b or BlaMka. The number of columns of the memory matrix is set to nCols = N_COLS.
* This version supports salts and passwords whose combined length is smaller than the size of the memory matrix,
* (i.e., (nRows x nCols x b) bits, where "b" is the underlying sponge's bitrate). In this implementation, the "params"
* is composed by all integer parameters (treated as type "unsigned int") in the order they are provided, plus the value
* of nCols, (i.e., params = kLen || pwdlen || saltlen || timeCost || nRows || nCols).
* In case of parallel version, there are two more "params": total of threads and thread number (nPARALLEL || threadNumber).
*
* @param out The derived key to be output by the algorithm
* @param outlen Desired key length
* @param in User password
* @param inlen Password length
* @param salt Salt
* @param saltlen Salt length
* @param t_cost Parameter to determine the processing time (T)
* @param m_cost Memory cost parameter (defines the number of rows of the memory matrix, R)
*
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int PHS(void *out, size_t outlen, const void *in, size_t inlen, const void *salt, size_t saltlen, unsigned int t_cost, unsigned int m_cost) {
return LYRA2(out, outlen, in, inlen, salt, saltlen, t_cost, m_cost, N_COLS);
}
#if (nPARALLEL == 1)
/**
* Executes Lyra2 based on the G function from Blake2b or BlaMka. This version supports salts and passwords
* whose combined length is smaller than the size of the memory matrix, (i.e., (nRows x nCols x b) bits,
* where "b" is the underlying sponge's bitrate). In this implementation, the "params" is composed by all
* integer parameters (treated as type "unsigned int") in the order they are provided, plus the value
* of nCols, (i.e., params = kLen || pwdlen || saltlen || timeCost || nRows || nCols).
*
* @param K The derived key to be output by the algorithm
* @param kLen Desired key length
* @param pwd User password
* @param pwdlen Password length
* @param salt Salt
* @param saltlen Salt length
* @param timeCost Parameter to determine the processing time (T)
* @param nRows Number or rows of the memory matrix (R)
* @param nCols Number of columns of the memory matrix (C)
*
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int LYRA2(void *K, unsigned int kLen, const void *pwd, unsigned int pwdlen, const void *salt, unsigned int saltlen, unsigned int timeCost, unsigned int nRows, unsigned int nCols) {
//============================= Basic variables ============================//
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
uint64_t step = 1; //Visitation step (used during Setup to dictate the sequence in which rows are read)
uint64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
uint64_t sqrt = 2; //Square of window (i.e., square(window)), when a window is a square number;
//otherwise, sqrt = 2*square(window/2)
uint64_t row0 = 3; //row0: sequentially written during Setup; randomly picked during Wandering
uint64_t prev0 = 2; //prev0: stores the previous value of row0
uint64_t row1 = 1; //row1: revisited during Setup, and then read [and written]; randomly picked during Wandering
uint64_t prev1 = 0; //prev1: stores the previous value of row1
uint64_t i; //auxiliary iteration counter
//==========================================================================/
//========== Initializing the Memory Matrix and pointers to it =============//
//Tries to allocate enough space for the whole memory matrix
i = (uint64_t) ((uint64_t) nRows * (uint64_t) ROW_LEN_BYTES);
uint64_t *wholeMatrix = malloc(i);
if (wholeMatrix == NULL) {
return -1;
}
//Allocates pointers to each row of the matrix
uint64_t **memMatrix = malloc(nRows * sizeof (uint64_t*));
if (memMatrix == NULL) {
return -1;
}
//Places the pointers in the correct positions
uint64_t *ptrWord = wholeMatrix;
for (i = 0; i < nRows; i++) {
memMatrix[i] = ptrWord;
ptrWord += ROW_LEN_INT64;
}
//==========================================================================/
//============= Padding (password + salt + params) with 10*1 ===============//
//OBS.:The memory matrix will temporarily hold the password: not for saving memory,
//but this ensures that the password copied locally will be overwritten as soon as possible
//First, we clean enough blocks for the password, salt, params and padding
//Change the ''6'' if different amounts of parameters were passed
uint64_t nBlocksInput = ((saltlen + pwdlen + 6 * sizeof (int)) / BLOCK_LEN_BLAKE2_SAFE_BYTES) + 1;
byte *ptrByte = (byte*) wholeMatrix;
memset(ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES);
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
ptrByte += saltlen;
//Concatenates the params: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &pwdlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &saltlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &timeCost, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nRows, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nCols, sizeof (int));
ptrByte += sizeof (int);
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) wholeMatrix; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
//==========================================================================/
//======================= Initializing the Sponge State ====================//
//Sponge state: 16 uint64_t, BLOCK_LEN_INT64 words of them for the bitrate (b) and the remainder for the capacity (c)
uint64_t *state = malloc(16 * sizeof (uint64_t));
if (state == NULL) {
return -1;
}
initState(state);
//==========================================================================/
//============= Absorbing the input data with the sponge ===============//
//Absorbing salt, password and params: this is the only place in which the block length is hard-coded to 512 bits, for compatibility with Blake2b and BlaMka
ptrWord = wholeMatrix;
for (i = 0; i < nBlocksInput; i++) {
absorbBlockBlake2Safe(state, ptrWord); //absorbs each block of pad(pwd || salt || params)
ptrWord += BLOCK_LEN_BLAKE2_SAFE_INT64; //goes to next block of pad(pwd || salt || params)
}
//================================================================================/
//================================ Setup Phase ==================================//
//==Initializes a (nRows x nCols) memory matrix, it's cells having b bits each)==//
//Initializes M[0]
reducedSqueezeRow0(state, memMatrix[0]); //The locally copied password is most likely overwritten here
//Initializes M[1]
reducedDuplexRow1and2(state, memMatrix[0], memMatrix[1]);
//Initializes M[2]
reducedDuplexRow1and2(state, memMatrix[1], memMatrix[2]);
//Filling Loop
for(row0 = 3 ; row0 < nRows; row0++){
//Performs a reduced-round duplexing operation over "M[row1][col] [+] M[prev0][col] [+] M[prev1][col]", filling M[row0] and updating M[row1]
//M[row0][N_COLS-1-col] = M[prev0][col] XOR rand;
//M[row1][col] = M[row1][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words)
reducedDuplexRowFilling(state, memMatrix[row1], memMatrix[prev0], memMatrix[prev1], memMatrix[row0]);
//Updates the "prev" indices: the rows more recently updated
prev0 = row0;
prev1 = row1;
//updates the value of row1: deterministically picked, with a variable step
row1 = (row1 + step) & (window - 1);
//Checks if all rows in the window where visited.
if (row1 == 0) {
window *= 2; //doubles the size of the re-visitation window
step = sqrt + gap; //changes the step: approximately doubles its value
gap = -gap; //inverts the modifier to the step
if (gap == -1){
sqrt *= 2; //Doubles sqrt every other iteration
}
}
}
//============================ Wandering Phase =============================//
//=====Iteratively overwrites pseudorandom cells of the memory matrix=======//
//Visitation Loop
for (i = 0 ; i < timeCost*nRows ; i++) {
//Selects a pseudorandom indices row0 and row1
//------------------------------------------------------------------------------------------
/*(USE THIS IF nRows IS A POWER OF 2)*/
//row0 = ((uint64_t)state[0]) & (nRows-1);
//row1 = ((uint64_t)state[2]) & (nRows-1);
/*(USE THIS FOR THE "GENERIC" CASE)*/
row0 = ((uint64_t)state[0]) % nRows; //row0 = lsw(rand) mod nRows
row1 = ((uint64_t)state[2]) % nRows; //row1 = lsw(rot(rand)) mod nRows
//we rotate 2 words for compatibility with the SSE implementation
//Performs a reduced-round duplexing operation over "M[row0][col] [+] M[row1][col] [+] M[prev0][col0] [+] M[prev1][col1], updating both M[row0] and M[row1]
//M[row0][col] = M[row0][col] XOR rand;
//M[row1][col] = M[row1][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words)
reducedDuplexRowWandering(state, memMatrix[row0], memMatrix[row1], memMatrix[prev0], memMatrix[prev1]);
//update prev's: they now point to the last rows ever updated
prev0 = row0;
prev1 = row1;
}
//==========================================================================/
//============================ Wrap-up Phase ===============================//
//========================= Output computation =============================//
//Absorbs one last block of the memory matrix with the full-round sponge
absorbColumn(state, memMatrix[row0]);
//Squeezes the key with the full-round sponge
squeeze(state, K, kLen);
//==========================================================================/
//========================= Freeing the memory =============================//
free(memMatrix);
free(wholeMatrix);
//Wiping out the sponge's internal state before freeing it
memset(state, 0, 16 * sizeof (uint64_t));
free(state);
//==========================================================================/
return 0;
}
#endif
#if (nPARALLEL > 1)
/**
* Executes Lyra2 based on the G function from Blake2b or BlaMka. This version supports salts and passwords
* whose combined length is smaller than the size of the memory matrix, (i.e., (nRows x nCols x b) bits,
* where "b" is the underlying sponge's bitrate). In this implementation, the "params" is composed by all
* integer parameters (treated as type "unsigned int") in the order they are provided, plus the value
* of nCols, (i.e., params = kLen || pwdlen || saltlen || timeCost || nRows || nCols || nPARALLEL || threadNumber).
*
* @param K The derived key to be output by the algorithm
* @param kLen Desired key length
* @param pwd User password
* @param pwdlen Password length
* @param salt Salt
* @param saltlen Salt length
* @param timeCost Parameter to determine the processing time (T)
* @param nRows Number or rows of the memory matrix (R)
* @param nCols Number of columns of the memory matrix (C)
*
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int LYRA2(void *K, unsigned int kLen, const void *pwd, unsigned int pwdlen, const void *salt, unsigned int saltlen, unsigned int timeCost, unsigned int nRows, unsigned int nCols) {
//============================= Basic variables ============================//
int64_t i,j; //auxiliary iteration counter
//==========================================================================/
//========== Initializing the Memory Matrix and pointers to it =============//
//Allocates pointers to each row of the matrix
uint64_t **memMatrix = malloc(nRows * sizeof (uint64_t*));
if (memMatrix == NULL) {
return -1;
}
//Allocates pointers to each key
unsigned char **pKeys = malloc(nPARALLEL * sizeof (unsigned char*));
if (pKeys == NULL) {
return -1;
}
#if _OPENMP <= 201107 //OpenMP 3.X or less
#pragma omp parallel num_threads(nPARALLEL) default(none) /*private(pwd)*/ shared(memMatrix, pKeys, pwd, pwdlen, salt, saltlen, nRows, nCols, kLen, timeCost)
#endif // _OPENMP
#if _OPENMP > 201107 //OpenMP 4.0
#pragma omp parallel proc_bind(spread) num_threads(nPARALLEL) default(none) /*private(pwd)*/ shared(memMatrix, pKeys, pwd, pwdlen, salt, saltlen, nRows, nCols, kLen, timeCost)
#endif // _OPENMP
{
//============================= Basic threads variables ============================//
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
uint64_t step = 1; //Visitation step (used during Setup and Wandering phases)
uint64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
uint64_t sync = 4; //Synchronize counter
uint64_t sqrt = 2; //Square of window (i.e., square(window)), when a window is a square number;
//otherwise, sqrt = 2*square(window/2)
uint64_t row0 = 3; //row0: sequentially written during Setup; randomly picked during Wandering
uint64_t prev0 = 2; //prev0: stores the previous value of row0
uint64_t rowP = 1; //rowP: revisited during Setup, and then read [and written]; randomly picked during Wandering
uint64_t prevP = 0; //prevP: stores the previous value of rowP
uint64_t threadNumber = 0;
uint64_t iP;
uint64_t jP; //Starts with threadNumber.
uint64_t kP;
uint64_t wCont;
uint64_t sizeSlicedRows;
uint64_t off0;
uint64_t offP;
//==========================================================================/
//========================== BootStrapping Phase ==========================//
// Size of each chunk that each thread will work with
sizeSlicedRows = nRows/nPARALLEL;
// Thread index:
threadNumber = omp_get_thread_num();
uint64_t sliceStart = threadNumber*sizeSlicedRows;
uint64_t halfSlice = sizeSlicedRows/2;
iP = (uint64_t) ((uint64_t) sizeSlicedRows * (uint64_t) ROW_LEN_BYTES);
uint64_t *threadSliceMatrix = malloc(iP);
if (threadSliceMatrix == NULL) {
printf("Error: unable to allocate memory (nRows too large?)\n");
exit(EXIT_FAILURE);
}
//Places the pointers in the correct positions
uint64_t *ptrWord = threadSliceMatrix;
for (kP = 0; kP < sizeSlicedRows; kP++) {
memMatrix[threadNumber*sizeSlicedRows + kP] = ptrWord;
ptrWord += ROW_LEN_INT64;
}
unsigned char *threadKey = malloc(kLen);
if (threadKey == NULL) {
exit(EXIT_FAILURE);
}
//Places the pointers in the correct positions
pKeys[threadNumber] = threadKey;
//==========================================================================/
//============= Padding (password + salt + params) with 10*1 ===============//
//OBS.:The memory matrix will temporarily hold the password: not for saving memory,
//but this ensures that the password copied locally will be overwritten as soon as possible
//First, we clean enough blocks for the password, salt, params and padding
//Change the ''8'' if different amounts of parameters were passed
uint64_t nBlocksInput = ((saltlen + pwdlen + 8 * sizeof (int)) / BLOCK_LEN_BLAKE2_SAFE_BYTES) + 1;
byte *ptrByte = (byte*) threadSliceMatrix;
memset(ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES);
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
ptrByte += saltlen;
//Concatenates the params: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &pwdlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &saltlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &timeCost, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nRows, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nCols, sizeof (int));
ptrByte += sizeof (int);
int p = nPARALLEL;
memcpy(ptrByte, &p, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &threadNumber, sizeof (int));
ptrByte += sizeof (int);
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) threadSliceMatrix; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
//==========================================================================/
//============== Initializing the Sponge State =============/
//Sponge state: 16 uint64_t, BLOCK_LEN_INT64 words of them for the bitrate (b) and the remainder for the capacity (c)
//Thread State
uint64_t *threadState = malloc(16 * sizeof (uint64_t));
if (threadState == NULL) {
exit(EXIT_FAILURE);
}
initState(threadState);
//==========================================================================/
//============= Absorbing the input data with the sponge ===============//
//Absorbing salt, password and params: this is the only place in which the block length is hard-coded to 512 bits, for compatibility with Blake2b and BlaMka
ptrWord = threadSliceMatrix;
for (kP = 0; kP < nBlocksInput; kP++) {
absorbBlockBlake2Safe(threadState, ptrWord); //absorbs each block of pad(pwd || salt || params)
ptrWord += BLOCK_LEN_BLAKE2_SAFE_INT64; //goes to next block of pad(pwd || salt || params)
}
//================================================================================/
//================================ Setup Phase ==================================//
//==Initializes a (nRows x nCols) memory matrix, it's cells having b bits each)==//
//Initializes M[0]
reducedSqueezeRow0(threadState, memMatrix[sliceStart]); //The locally copied password is most likely overwritten here
//Initializes M[1]
reducedDuplexRow1and2(threadState, memMatrix[sliceStart], memMatrix[sliceStart + 1]);
//Initializes M[2]
reducedDuplexRow1and2(threadState, memMatrix[sliceStart + 1], memMatrix[sliceStart + 2]);
jP = threadNumber;
//Filling Loop
for (row0 = 3; row0 < sizeSlicedRows; row0++) {
//Performs a reduced-round duplexing operation over "Mj[rowP][col] [+] Mi[prev0][col] [+] Mj[prevP][col]", filling Mi[row0] and updating Mj[rowP]
//Mi[row0][N_COLS-1-col] = Mi[prev0][col] XOR rand;
//Mj[rowP][col] = Mj[rowP][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words)
reducedDuplexRowFilling(threadState, memMatrix[jP*sizeSlicedRows + rowP], memMatrix[sliceStart + prev0], memMatrix[jP*sizeSlicedRows + prevP], memMatrix[sliceStart + row0]);
//Updates the "prev" indices: the rows more recently updated
prev0 = row0;
prevP = rowP;
//updates the value of rowP: deterministically picked, with a variable step
rowP = (rowP + step) & (window - 1);
//Checks if all rows in the window where visited.
if (rowP == 0) {
window *= 2; //doubles the size of the re-visitation window
step = sqrt + gap; //changes the step: approximately doubles its value
gap = -gap; //inverts the modifier to the step
if (gap == -1){
sqrt *= 2; //Doubles sqrt every other iteration
}
}
//Synchronize threads and change the slices
if (row0 == sync) {
sync += sqrt/2; //increment synchronize counter
jP = (jP + 1) % nPARALLEL; //change the visitation thread
#pragma omp barrier
}
}
// Needs all matrix done before starting Wandering Phase.
#pragma omp barrier
//============================ Wandering Phase =============================//
//=====Iteratively overwrites pseudorandom cells of the memory matrix=======//
window = halfSlice;
sync = sqrt;
off0 = 0;
offP = window;
uint64_t offTemp;
//Visitation Loop
for (wCont = 0; wCont < timeCost*sizeSlicedRows; wCont++){
//Selects a pseudorandom indices row0 and rowP
//------------------------------------------------------------------------------------------
/*(USE THIS IF window IS A POWER OF 2)*/
//row0 = off0 + (((uint64_t)threadState[0]) & (window-1));
//rowP = offP + (((uint64_t)threadState[2]) & (window-1));
/*(USE THIS FOR THE "GENERIC" CASE)*/
row0 = off0 + (((uint64_t)threadState[0]) % window); //row0 = off0 + (lsw(rand) mod window)
rowP = offP + (((uint64_t)threadState[2]) % window); //row1 = offP + (lsw(rot(rand)) mod window)
//we rotate 2 words for compatibility with the SSE implementation
//Selects a pseudorandom indices jP
jP = ((uint64_t)threadState[4]) % nPARALLEL; //jP = lsw(rot^2(rand)) mod nPARALLEL
//we rotate 2 words for compatibility with the SSE implementation
//Performs a reduced-round duplexing operation over "Mi[row0][col] [+] Mj[rowP][col] [+] Mi[prev0][col0]", updating Mi[row0]
//Mi[row0][col] = Mi[row0][col] XOR rand;
reducedDuplexRowWanderingParallel(threadState, memMatrix[sliceStart + row0], memMatrix[jP*sizeSlicedRows + rowP], memMatrix[sliceStart + prev0]);
//update prev: they now point to the last rows ever updated
prev0 = row0;
//Synchronize threads and change the slices
if (wCont == sync) {
sync += sqrt;
offTemp = off0;
off0 = offP;
offP = offTemp;
#pragma omp barrier
}
}
#pragma omp barrier
//==========================================================================/
//============================ Wrap-up Phase ===============================//
//========================= Output computation =============================//
//Absorbs one last block of the memory matrix with the full-round sponge
absorbColumn(threadState, memMatrix[sliceStart + row0]);
//Squeezes the key
squeeze(threadState, threadKey, kLen);
//========================= Freeing the thread memory =============================//
free(threadSliceMatrix);
//Wiping out the sponge's internal state before freeing it
memset(threadState, 0, 16 * sizeof (uint64_t));
free(threadState);
} // Parallelism End
// XORs all Keys
for (i = 1; i < nPARALLEL; i++) {
for (j = 0; j < kLen; j++) {
pKeys[0][j] ^= pKeys[i][j];
}
}
// Returns in the correct variable
memcpy(K, pKeys[0], kLen);
//========================= Freeing the memory =============================//
free(memMatrix);
//Free each thread Key
for (i = 0; i < nPARALLEL; i++) {
free(pKeys[i]);
}
//Free the pointers to allKeys
free(pKeys);
//==========================================================================/
return 0;
}
#endif |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.