source
stringlengths
3
92
c
stringlengths
26
2.25M
spikefit.c
/* C library for spike fitting, a re-implementation of Matlab fitting. See spikefit.mw for any missing documentation, and MEX calling setup. Barnett 2/12/15 start. */ #include "spikefit.h" //---------------------------------------------------------------------------- void spikemod(double* W, int M, int T, int K, int fac, int Ns, int* l, double* t, double* a, int Nt, double* F, int subF, int* iran) /* Algorithm is synthesis/spikemodel with 1-indexed changed to 0-indexed. Inputs: W - (M*T*K) waveforms 3d array stored contiguously, indexed by M fast, T med, K slowest. M,T,K - number of channels, waveform time points (upsampled), spike types fac - (integer) upsampling ratio Ns - number of spikes l,t,a - pointers to Ns-length arrays of model params (label, time, ampl) Nt - number of time points in signal subF - flag: 0 zeros F first and adds spikes; 1 subtracts spikes from whatever was in F already, 2 assumes Ns=1 overwriting only supp(F) Outputs: F - (M*Nt) signal array M fast, Nt slow. iran - length-2 integer array of [ilo,ihi] for the most recent spike added Barnett 2/13/15. Better scaling for big clips (limiting iput) 3/19/15. subF=2 case where only writes to one-spike supp(F), 4/30/15 */ { int i,s,iput,m,ilo,ihi, icenW = (T-1)/2; // center, 0-indexed double al[MAXSPIKESPEREVENT]; // local copy of ampls a //printf("%d %d %d %d %d %d\n",M,T,K,fac,Ns,Nt); // debug int pad = 1; // padding for good luck (integer rounding) if (subF==1) for (s=0; s<Ns; ++s) al[s] = -a[s]; // negate ampls else { for (s=0; s<Ns; ++s) al[s] = a[s]; // usual ampls if (subF==0) for (i=0; i<M*Nt; ++i) F[i] = 0.0; // initialize all F=0 else if (subF==2 && Ns==1) { // init F=0 only in 1-spike support ilo = MAX(0,floor(t[0] - icenW/(double)fac - pad)); ihi = MIN(Nt,ceil(t[0] + icenW/(double)fac + pad)); for (i=ilo; i<ihi; ++i) for (m=0; m<M; ++m) F[i*M+m] = 0.0; // initialize F=0 only in one-spike support of F } } for (s=0; s<Ns; ++s) { // loop over spikes // range of output indices to sweep... ilo = MAX(0,floor(t[s] - icenW/(double)fac - pad)); ihi = MIN(Nt,ceil(t[s] + icenW/(double)fac + pad)); for (iput=ilo; iput<ihi; ++iput) { int iget = round(icenW + fac*(iput - t[s])); // t starts at zero if (iget>=0 && iget<T) { // only read values lying in waveform int oput = iput*M; // write offset for the channel loop int oget = (iget + (l[s]-1)*T)*M; // read offset (ie for this time & k) for (m=0; m<M; ++m) // loop over channels, copy col vec F[oput+m] = F[oput+m] + al[s] * W[oget+m]; // use local ampl } } } iran[0] = ilo; iran[1] = ihi; // output the signal index range } //---------------------------------------------------------------------------- double nll(double* W, int M, int T, int K, int fac, int Ns, int* l, double* t, double* a, int Nt, double* Y, double* F, double eta, int locflag, double* srt) /* NLL - return negative log likelihood for spike model w/ iid Gaussian noise Only one type of simple model for now. Inputs: W - (M*T*K) waveforms 3d array stored contiguously M,T,K - number of channels, waveform time points (upsampled), spike types fac - (integer) upsampling ratio Ns - number of spikes to read from model params l,t,a - pointers to Ns-length arrays of model params Nt - number of time points in signal Y - (M*Nt) signal array stored continuously F - (M*Nt) needed workspace, returns signal for the model params (unless locflag=1 then F is garbage outside supp(F)) eta - noise sqrt(variance), only aspect of noise model used for now locflag - (integer) if 0, compute NLL usual way; if 1, use local update. If 2, compute NLL usual way & initialize srt srt - size Nt double array of sum squared Y per time pt. Is updated on output. Output: minus log (likelihood of signal Y given spike model F) Barnett 2/15/15 */ { int i, iran[2], m; double x, J = 0.0; // J=NLL if (locflag==0) { // standard NLL eval spikemod(W, M, T, K, fac, Ns, l, t, a, Nt, F, 0, iran); // subF init's F for (i=0; i<M*Nt; ++i) { // l_2^2 norm over channels & times... x = Y[i]-F[i]; J += x*x; } } else if (locflag==2) { // same as locflag=0 but also initializes srt spikemod(W, M, T, K, fac, Ns, l, t, a, Nt, F, 0, iran); // subF init's F for (i=0; i<Nt; ++i) { // l_2^2 norm. over times... double srti = 0.0; for (m=0; m<M; ++m) { // channels x = Y[i*M+m]-F[i*M+m]; srti += x*x; } srt[i] = srti; J += srti; } } else if (locflag==1) { spikemod(W, M, T, K, fac, Ns, l, t, a, Nt, F, 2, iran); // subF=2 not init F for (i=0; i<iran[0]; ++i) // sum srt in region before supp(F) J += srt[i]; for (i=iran[1]; i<Nt; ++i) // .. and after J += srt[i]; // only write to support of F... //printf("iran = %d %d\n",iran[0],iran[1]); // debug for (i=iran[0]; i<iran[1]; ++i) { // l_2^2 norm over supp(F) double srti = 0.0; for (m=0; m<M; ++m) { // channels x = Y[i*M+m]-F[i*M+m]; srti += x*x; } J += srti; } } J /= 2.0*eta*eta; // overall noise scaling return J; } //---------------------------------------------------------------------------- void fitonesp(double* W, int M, int T, int K, int fac, int* lb, double* tb, double* ab, int Nt, double* Y, double eta, double tpad, double* Fb, double* Jbest, double* nlps, int locflag, double* srt) /* FITONESP - fit one spike, based on stageC_fitting/fitonespike.m Inputs: W - (M*T*K) waveforms 3d array stored contiguously M,T,K - number of channels, waveform time points (upsampled), spike types fac - (integer) upsampling ratio Nt - number of time points in signal Y - (M*Nt) signal array stored continuously eta - noise sqrt(variance), only aspect of noise model used for now tpad - (double) padding in time points to stop spike bumping into the end nlps - (double size K) -log prior probabilities for each spike type 1..K locflag - (integer) if 0, compute NLL usual way; if 1, use local update. srt - size Nt double array of squared resid per time pt. Is updated. Outputs: lb (int), tb (double), ab (double) - 1-spike best model params (lb in 1..K) Fb (M*Nt double) - model sig output at best-fit params, stored contiguously Jbest - best-fit objective func = neg log lik Algorithm : exhaustive search of max posterior prob over time t and label l. Doesn't store J's for all param choices (as would be needed for marginalization), just the running best one. todo: check only on original sample grid, then upsample only near ones close to min J todo: ampl fitting, via (d/da)NLL explicit formula. todo: Bayesian evidence, or analytic amplitude marginalization. Barnett 2/13/15-2/15/15. Fix l is 1-indexed not 0-indexed bug, 2/18/15. log priors 3/12/15. locflag & srt 4/30/15 */ { int l,i,m; double t; double a = 1.0; // fix all ampls for now double* F = (double *) malloc(sizeof(double)*M*Nt); // alloc for nll int iran[2]; // for index range of supp(F) *Jbest = INFINITY; for (l=1; l<=K; ++l) { // waveforms (outer loop since spaced in RAM) for (t=tpad; t<=(double)Nt-1-tpad; t+=1.0/fac) { // timeshifts double J = nll(W, M, T, K, fac, 1, &l, &t, &a, Nt, Y, F, eta, locflag, srt); J = J + nlps[l-1]; // -log posterior = -log lik - log prior // printf("l=%d\tt=%.1f\tJ=%.3f\n",l,t,J); // debug if (J<*Jbest) { *Jbest = J; *lb = l; *tb = t; *ab = a; // save the best params } } } // re-eval Fb best model, and maybe update srt in supp(F)... spikemod(W, M, T, K, fac, 1, lb, tb, ab, Nt, Fb, 0, iran); if (locflag) { for (i=iran[0]; i<iran[1]; ++i) { // l_2^2 norms over supp(F) double srti = 0.0; for (m=0; m<M; ++m) { // channels double x = Y[i*M+m]-Fb[i*M+m]; srti += x*x; } srt[i] = srti; } } free(F); } //---------------------------------------------------------------------------- void fitgreedy(double* W, int M, int T, int K, int fac, int* Ns, int* lb, double* tb, double* ab, int Nt, double* Y, double eta, double tpad, int maxNs, double* J, double* R, double* nlps, int locflag) /* FITGREEDY - greedy fitting of multiple spikes in one signal window Inputs: W - (M*T*K double) waveforms 3d array stored contiguously M,T,K - number of channels, waveform time points (upsampled), spike types fac - (int) upsampling ratio Nt - (int) number of time points in signal Y - (M*Nt double) signal array stored continuously eta - (double) noise sqrt(variance), only aspect of noise model used for now tpad - (double) padding in time points to stop spike bumping into the end maxNs - (int) max # spikes to add into model nlps - (double size K) -log prior probabilities for each spike type 1..K locflag - (integer) if 0, compute NLL usual way; if 1, use local update. Outputs: Ns (int) - number of spikes in best fit lb (int), tb (double), ab (double) - multi-spike best model param arrays (each must be allocated up to size maxspikes). Are padded with 0 or NANs. J (double, maxNs+1) - best-fit obj func after each spike = neg log lik Note: J[0...Ns] contains greedy fitting history. If Ns<maxNs, J[Ns+1] contains the best J for Ns+1th spike not added. R (M*Nt double) - residual at best-fit params, stored contiguously Algorithm: multiple calls to fitonesp. See also: ../fitgreedyspikes.m which is similar but outputs signal not resid. Barnett 2/16/15. fixed bug that if *Ns=0, was wrong 2/17/15. log priors 3/12/15 */ { int i, s; double *srt; double* Fb1 = (double *) malloc(sizeof(double)*M*Nt); // alloc for sig if (locflag) { srt = (double *) malloc(sizeof(double)*Nt); // nll per time-pt // here lb, tb, ab, Fb1 are dummies since it's a 0-spike model... J[0] = nll(W, M, T, K, fac, 0, lb, tb, ab, Nt, Y, Fb1, eta, 2, srt); // here the locflag = 2 makes nll initialize srt } else J[0] = nll(W, M, T, K, fac, 0, lb, tb, ab, Nt, Y, Fb1, eta, locflag, srt); for (i=0; i<M*Nt; ++i) R[i] = Y[i]; // init residual R (is the fitted thing) *Ns = 0; // init output # spikes for (s=0; s<maxNs; ++s) { // loop over adding spikes... // note writes best params into correct place in lb,tb,ab, and J arrays... fitonesp(W, M, T, K, fac, lb+s, tb+s, ab+s, Nt, R, eta, tpad, Fb1, J+s+1, nlps, locflag, srt); if (J[s+1]<J[s]) { // accept the new spike *Ns = s+1; // update number of spikes for (i=0; i<M*Nt; ++i) R[i] -= Fb1[i]; // subtract model from resid } else { // clean up and stop lb[s] = 0; tb[s] = NAN; ab[s] = NAN; // leave the not-accepted J break; } } free(Fb1); } //---------------------------------------------------------------------------- void multifitgreedy(double* W, int M, int T, int K, int fac, int* Ns, int* lb, double* tb, double* ab, int *Tc, int Nc, double* Y, double eta, double tpad, int maxNs, int wantR, double* J, double* R, double* nlps, int locflag) /* MULTIFITGREEDY - greedy fitting many spikes in many clip windows, OpenMP Inputs: W - (M*T*K) waveforms 3d array stored contiguously M,T,K - number of channels, waveform time points (upsampled), spike types fac - (integer) upsampling ratio Tc (int size Nc) - number of time points in each signal clip Nc - number of signal clips Y - (M*sum(Tc)) signal array stored contiguously eta - noise sqrt(variance), only aspect of noise model used for now tpad - (double) padding in time points to stop spike bumping into the end maxNs - max # spikes to add into model for fitting each clip wantR - 1 to write to residual array (size of Y); 0 not to. nlps - (double size K) -log prior probabilities for each spike type 1..K locflag - (integer) if 0, compute NLL usual way; if 1, use local update. if 2 (default), switch based on which is fastest. Outputs: Ns (int size Nc) - number of spikes in best fit model for each clip lb (maxNs*Nc int), tb (maxNs*Nc double), ab (maxNs*Nc double) - multi-spike best model param arrays (only first Ns[c] valid in col c). maxNs is the fast (row) variable. J (double, (maxNs+1)*Nc) - history of greedy best-fit objective func ( = neg log lik) for each clip. maxNs+1 is the fast (row) axis. R (double, M*sum(Tc)) optional residual array for all clips contiguously Algorithm: multiple calls to fitonesp, distributed across cores. Notes: if want fast, think re flops & RAM mvmt. Here I'm doing a bit of local cacheing todo: test differing Tc's todo: make this choose locflag = (Nt > 100) on a per-clip basis if locflag==2. See also: test_multifitgreedy.m, ../fitgreedyspikes.m (on which based) Barnett 2/17/15. neg log priors 3/12/15 */ { int Nt, ns, i, c, o, *l; // c will loop over clips double *t,*a,*Jloc,*Yloc,*Rloc; // set up p array (since omp not sequential) = indexes along time axis (not M) int* p = (int*)malloc(Nc*sizeof(int)); p[0] = 0; for (c=0;c<Nc-1;++c) p[c+1] = p[c] + Tc[c]; int maxNt = 0; for (c=0; c<Nc; ++c) if (Tc[c]>maxNt) maxNt=Tc[c]; // compute max over Tc // note shared is default. The idea of splitting the parallel and for comes // from: http://stackoverflow.com/questions/2352895/how-to-ensure-a-dynamically-allocated-array-is-private-in-openmp //omp_set_num_threads(8); #pragma omp parallel private(c,Yloc,Rloc,Jloc,l,t,a,i,ns,Nt,o) { // alloc stuff once for each thread... (saves re-mallocing inside loop) Yloc = (double*) malloc(M*maxNt*sizeof(double)); Rloc = (double*) malloc(M*maxNt*sizeof(double)); Jloc = (double*) malloc((maxNs+1)*sizeof(double)); l = (int*) malloc(maxNs*sizeof(int)); t = (double*) malloc(maxNs*sizeof(double)); a = (double*) malloc(maxNs*sizeof(double)); #pragma omp for schedule(dynamic) // dynamic important: effort varies as O(Nt^2) for (c=0; c<Nc; ++c) { Nt = Tc[c]; // Nt for this clip for (i=0; i<maxNs; ++i) { // reset w/ NAN the local arrays l[i] = 0; t[i] = NAN; a[i] = NAN; // there's no int NAN; use 0 } for (i=0; i<maxNs+1; ++i) Jloc[i] = NAN; for (i=0; i<M*Nt; ++i) Yloc[i] = Y[i+p[c]*M]; // get from distant Y array // we just made local Y copy since fitgreedy accesses Yloc a lot... int locflagc = locflag; if (locflag==2) locflagc = (Nt>100); // choose on a per-clip basis fitgreedy(W, M, T, K, fac, &ns, l,t,a, Nt, Yloc, eta, tpad, maxNs, Jloc, Rloc, nlps, locflagc); // now uncaching: copy local arrays into the big distant arrays... Ns[c] = ns; //printf(" ns=%d Jbest=%.3f\n",ns,Jloc[ns]); //for (i=0;i<maxNs+1;++i) printf("Jloc[%d] = %.3f\n",i,Jloc[i]); o = c*maxNs; // offset in the maxNs*something arrays for (i=0; i<maxNs; ++i) { lb[i+o] = l[i]; tb[i+o] = t[i]; ab[i+o] = a[i]; } o = c*(maxNs+1); // J has 1 more row! for (i=0; i<maxNs+1; ++i) J[i+o] = Jloc[i]; // copy the J history into col if (wantR) // copy local resid into big output array for (i=0; i<M*Nt; ++i) R[i+p[c]*M] = Rloc[i]; } free(Yloc); // free for each thread free(Rloc); free(Jloc); free(l); free(a); free(t); } // end omp parallel free(p); }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; double alpha, beta, distance; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*amount*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*amount*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*amount*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t colors, depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; colors=number_colors; for (depth=1; colors != 0; depth++) colors>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != BlendPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t i; ssize_t y; for (i=0; i < (ssize_t) number_threads; i++) (void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i; ssize_t j; /* Assign each pixel whose mean has the least squared color distance. */ j=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; j=i; } } kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != BlendPixelTrait) kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][j].count++; kmeans_pixels[id][j].distortion+=min_distance; SetPixelIndex(image,(Quantum) j,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (i=1; i < (ssize_t) number_threads; i++) { ssize_t j; for (j=0; j < (ssize_t) image->colors; j++) { kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red; kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green; kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue; if (image->alpha_trait != BlendPixelTrait) kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black; kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count; kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (i=0; i < (ssize_t) image->colors; i++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count); image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red; image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green; image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue; if (image->alpha_trait != BlendPixelTrait) image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha; if (image->colorspace == CMYKColorspace) image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black; distortion+=kmeans_pixels[0][i].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; ssize_t i; size_t extent; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
LAGraph_ConnectedComponents.c
//------------------------------------------------------------------------------ // LAGraph_ConnectedComponents: connected components //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause //------------------------------------------------------------------------------ // Code is based on the algorithm described in the following paper // Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component // Algorithm with Fast Convergence (SIAM PP20) // A subsequent update to the algorithm is here (which might not be reflected // in this code): // // Yongzhe Zhang, Ariful Azad, Aydin Buluc: Parallel algorithms for finding // connected components using linear algebra. J. Parallel Distributed Comput. // 144: 14-27 (2020). // Modified by Tim Davis, Texas A&M University // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. // The matrix A must have dimension 2^32 or less. // TODO: Need a 64-bit version of this method. // TODO: this function is not thread-safe, since it exports G->A and then // reimports it back. G->A is unchanged when the function returns, but during // execution G->A is invalid. #include "LG_internal.h" #define LAGRAPH_FREE_ALL //------------------------------------------------------------------------------ // hash functions: TODO describe me //------------------------------------------------------------------------------ // hash table size must be a power of 2 #define HASH_SIZE 1024 // number of samples to insert into the hash table // TODO: this seems to be a lot of entries for a HASH_SIZE of 1024. // There could be lots of collisions. #define HASH_SAMPLES 864 #define HASH(x) (((x << 4) + x) & (HASH_SIZE-1)) #define NEXT(x) ((x + 23) & (HASH_SIZE-1)) //------------------------------------------------------------------------------ // ht_init: TODO describe me //------------------------------------------------------------------------------ // Clear the hash table counts (ht_val [0:HASH_SIZE-1] = 0), and set all hash // table entries as empty (ht_key [0:HASH_SIZE-1] =-1). // TODO: the memset of ht_key is confusing // TODO: the name "ht_val" is confusing. It is not a value, but a count of // the number of times the value x = ht_key [h] has been inserted into the // hth position in the hash table. It should be renamed ht_cnt. static inline void ht_init ( int32_t *ht_key, int32_t *ht_val ) { memset (ht_key, -1, sizeof (int32_t) * HASH_SIZE) ; memset (ht_val, 0, sizeof (int32_t) * HASH_SIZE) ; } //------------------------------------------------------------------------------ // ht_sample: TODO describe me //------------------------------------------------------------------------------ // static inline void ht_sample ( uint32_t *V32, // array of size n (TODO: this is a bad variable name) int32_t n, int32_t samples, // number of samples to take from V32 int32_t *ht_key, int32_t *ht_val, uint64_t *seed ) { for (int32_t k = 0 ; k < samples ; k++) { // select an entry from V32 at random int32_t x = V32 [LAGraph_Random60 (seed) % n] ; // find x in the hash table // TODO: make this loop a static inline function (see also below) int32_t h = HASH (x) ; while (ht_key [h] != -1 && ht_key [h] != x) { h = NEXT (h) ; } ht_key [h] = x ; ht_val [h]++ ; } } //------------------------------------------------------------------------------ // ht_most_frequent: TODO describe me //------------------------------------------------------------------------------ // TODO what if key is returned as -1? Code breaks. TODO: handle this case static inline int32_t ht_most_frequent ( int32_t *ht_key, int32_t *ht_val ) { int32_t key = -1 ; int32_t val = 0 ; // max (ht_val [0:HASH_SIZE-1]) for (int32_t h = 0 ; h < HASH_SIZE ; h++) { if (ht_val [h] > val) { key = ht_key [h] ; val = ht_val [h] ; } } return (key) ; // return most frequent key } //------------------------------------------------------------------------------ // Reduce_assign32: w (index) += s, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL. // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. GrB_assign states that the presence of duplicates results // in undefined behavior. GrB_assign in SuiteSparse:GraphBLAS follows the // MATLAB rule, which discards all but the first of the duplicates. // TODO: add this to GraphBLAS as a variant of GrB_assign, either as // GxB_assign_accum (or another name), or as a GxB_* descriptor setting. static inline int Reduce_assign32 ( GrB_Vector *w_handle, // vector of size n, all entries present GrB_Vector *s_handle, // vector of size n, all entries present uint32_t *index, // array of size n, can have duplicates GrB_Index n, int nthreads, int32_t *ht_key, // hash table int32_t *ht_val, // hash table (count of # of entries) uint64_t *seed, // random char *msg ) { GrB_Type w_type, s_type ; GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i, w_size, s_size ; uint32_t *w_x, *s_x ; w_x = NULL ; s_x = NULL ; w_i = NULL ; s_i = NULL ; //-------------------------------------------------------------------------- // export w and s //-------------------------------------------------------------------------- // export the GrB_Vectors w and s as full arrays, to get direct access to // their contents. Note that this would fail if w or s are not full, with // all entries present. #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) bool w_is_uniform, s_is_uniform ; GrB_TRY (GxB_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x, &w_size, &w_is_uniform, NULL)) ; GrB_TRY (GxB_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x, &s_size, &s_is_uniform, NULL)) ; #else GrB_TRY (GxB_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x, &w_size, NULL)) ; GrB_TRY (GxB_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x, &s_size, NULL)) ; #endif if (nthreads >= 4) { // allocate a buf array for each thread, of size HASH_SIZE size_t mem_size = 0 ; uint32_t *mem = LAGraph_Malloc (nthreads*HASH_SIZE, sizeof (uint32_t), &mem_size) ; // TODO: check out-of-memory condition here // TODO why is hashing needed here? hashing is slow for what needs // to be computed here. GraphBLAS has fast MIN atomic monoids that // do not require hashing. ht_init (ht_key, ht_val) ; ht_sample (index, n, HASH_SAMPLES, ht_key, ht_val, seed) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { // get the thread-specific buf array of size HASH_SIZE // TODO: buf is a bad variable name; it's not a "buffer", // but a local workspace to compute the local version of w_x. uint32_t *buf = mem + tid * HASH_SIZE ; // copy the values from the global hash table into buf for (int32_t h = 0 ; h < HASH_SIZE ; h++) { if (ht_key [h] != -1) { buf [h] = w_x [ht_key [h]] ; } } // this thread works on index [kstart:kend] int32_t kstart = (n * tid + nthreads - 1) / nthreads ; int32_t kend = (n * tid + n + nthreads - 1) / nthreads ; for (int32_t k = kstart ; k < kend ; k++) { uint32_t i = index [k] ; // TODO: make this loop a static inline function int32_t h = HASH (i) ; while (ht_key [h] != -1 && ht_key [h] != i) { h = NEXT (h) ; } if (ht_key [h] == -1) { // TODO is this a race condition? w_x [i] = LAGraph_MIN (w_x [i], s_x [k]) ; } else { buf [h] = LAGraph_MIN (buf [h], s_x [k]) ; } } } // combine intermediate results from each thread for (int32_t h = 0 ; h < HASH_SIZE ; h++) { int32_t i = ht_key [h] ; if (i != -1) { for (int32_t tid = 0 ; tid < nthreads ; tid++) { w_x [i] = LAGraph_MIN (w_x [i], mem [tid * HASH_SIZE + h]) ; } } } LAGraph_Free ((void **) &mem, mem_size) ; } else { // sequential version for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; w_x [i] = LAGraph_MIN (w_x [i], s_x [k]) ; } } //-------------------------------------------------------------------------- // reimport w and s back into GrB_Vectors, and return result //-------------------------------------------------------------------------- // s is unchanged. It was exported only to compute w (index) += s #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) GrB_TRY (GxB_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x, w_size, w_is_uniform, NULL)) ; GrB_TRY (GxB_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x, s_size, s_is_uniform, NULL)) ; #else GrB_TRY (GxB_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x, w_size, NULL)) ; GrB_TRY (GxB_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x, s_size, NULL)) ; #endif return (0) ; } //------------------------------------------------------------------------------ // LAGraph_ConnectedComponents //------------------------------------------------------------------------------ #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGraph_Free ((void **) &I, I_size) ; \ LAGraph_Free ((void **) &V32, V32_size) ; \ LAGraph_Free ((void **) &ht_key, ht_key_size) ; \ LAGraph_Free ((void **) &ht_val, ht_val_size) ; \ /* TODO why is T not freed?? */ \ GrB_free (&f) ; \ GrB_free (&gp) ; \ GrB_free (&mngp) ; \ GrB_free (&gp_new) ; \ GrB_free (&mod) ; \ } int LAGraph_ConnectedComponents ( // output GrB_Vector *component, // component(i)=k if node is in the kth component // inputs LAGraph_Graph G, // input graph, G->A can change char *msg ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; uint32_t *V32 = NULL ; int32_t *ht_key = NULL, *ht_val = NULL ; GrB_Index n, nnz, *I = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix T = NULL ; size_t I_size = 0, V32_size = 0, ht_key_size = 0, ht_val_size = 0 ; LG_CHECK (LAGraph_CheckGraph (G, msg), -1, "graph is invalid") ; LG_CHECK (component == NULL, -1, "component parameter is NULL") ; if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED || (G->kind == LAGRAPH_ADJACENCY_DIRECTED && G->A_pattern_is_symmetric == LAGRAPH_TRUE)) { // A must be symmetric ; } else { // A must not be unsymmetric LG_CHECK (false, -1, "input must be symmetric") ; } GrB_Matrix S = G->A ; GrB_TRY (GrB_Matrix_nrows (&n, S)) ; GrB_TRY (GrB_Matrix_nvals (&nnz, S)) ; LG_CHECK (n > UINT32_MAX, -1, "problem too large (FIXME)") ; #define FASTSV_SAMPLES 4 bool sampling = (n * FASTSV_SAMPLES * 2 < nnz) ; // random number seed uint64_t seed = n ; //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- // determine # of threads to use for Reduce_assign int nthreads ; LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, NULL)) ; nthreads = LAGraph_MIN (nthreads, n / 16) ; nthreads = LAGraph_MAX (nthreads, 1) ; // # of threads to use for typecast int nthreads2 = n / (64*1024) ; nthreads2 = LAGraph_MIN (nthreads2, nthreads) ; nthreads2 = LAGraph_MAX (nthreads2, 1) ; // vectors GrB_TRY (GrB_Vector_new (&f, GrB_UINT32, n)) ; GrB_TRY (GrB_Vector_new (&gp_new, GrB_UINT32, n)) ; GrB_TRY (GrB_Vector_new (&mod, GrB_BOOL, n)) ; // temporary arrays I = LAGraph_Malloc (n, sizeof (GrB_Index), &I_size) ; V32 = LAGraph_Malloc (n, sizeof (uint32_t), &V32_size) ; // TODO: check out-of-memory condition // prepare vectors #pragma omp parallel for num_threads(nthreads2) schedule(static) for (GrB_Index i = 0 ; i < n ; i++) { I [i] = i ; V32 [i] = (uint32_t) i ; } GrB_TRY (GrB_Vector_build (f, I, V32, n, GrB_PLUS_UINT32)) ; GrB_TRY (GrB_Vector_dup (&gp, f)) ; GrB_TRY (GrB_Vector_dup (&mngp, f)) ; // allocate the hash table ht_key = LAGraph_Malloc (HASH_SIZE, sizeof (int32_t), &ht_key_size) ; ht_val = LAGraph_Malloc (HASH_SIZE, sizeof (int32_t), &ht_val_size) ; LG_CHECK (ht_key == NULL || ht_val == NULL, -1, "out of memory") ; //-------------------------------------------------------------------------- // sample phase //-------------------------------------------------------------------------- if (sampling) { //---------------------------------------------------------------------- // export S = G->A in CSR format //---------------------------------------------------------------------- // S is not modified. It is only exported so that its contents can be // read by the parallel loops below. GrB_Type type ; GrB_Index nrows, ncols, nvals, typesize ; int64_t nonempty ; GrB_Index *Sp, *Sj ; void *Sx ; bool S_jumbled = false, S_is_uniform ; GrB_Index Sp_size, Sj_size, Sx_size ; GrB_TRY (GrB_Matrix_nvals (&nvals, S)) ; #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) GrB_TRY (GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &Sp, &Sj, &Sx, &Sp_size, &Sj_size, &Sx_size, &S_is_uniform, &S_jumbled, NULL)) ; #else GrB_TRY (GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &Sp, &Sj, &Sx, &Sp_size, &Sj_size, &Sx_size, &S_jumbled, NULL)) ; #endif GrB_TRY (GxB_Type_size (&typesize, type)) ; G->A = NULL ; //---------------------------------------------------------------------- // allocate space to construct T //---------------------------------------------------------------------- GrB_Index Tp_len = nrows+1, Tp_size = 0 ; GrB_Index Tj_len = nvals, Tj_size = 0 ; GrB_Index Tx_len = nvals, Tx_size = 0 ; GrB_Index *Tp = LAGraph_Malloc (Tp_len, sizeof (GrB_Index), &Tp_size) ; GrB_Index *Tj = LAGraph_Malloc (Tj_len, sizeof (GrB_Index), &Tj_size) ; void *Tx = LAGraph_Malloc (Tx_len, typesize, &Tx_size) ; // TODO check out-of-memory conditions //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- size_t range_size = 0, count_size = 0 ; int32_t *range = LAGraph_Malloc (nthreads + 1, sizeof (int32_t), &range_size) ; GrB_Index *count = LAGraph_Malloc (nthreads + 1, sizeof (GrB_Index), &count_size) ; // TODO check out-of-memory conditions memset (count, 0, sizeof (GrB_Index) * (nthreads + 1)) ; //---------------------------------------------------------------------- // define parallel tasks to construct T //---------------------------------------------------------------------- // thread tid works on rows range[tid]:range[tid+1]-1 of S and T for (int tid = 0 ; tid <= nthreads ; tid++) { range [tid] = (n * tid + nthreads - 1) / nthreads ; } //---------------------------------------------------------------------- // determine the number entries to be constructed in T for each thread //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { for (int32_t i = range [tid] ; i < range [tid+1] ; i++) { int32_t deg = Sp [i + 1] - Sp [i] ; count [tid + 1] += LAGraph_MIN (FASTSV_SAMPLES, deg) ; } } //---------------------------------------------------------------------- // count = cumsum (count) //---------------------------------------------------------------------- for (int tid = 0 ; tid < nthreads ; tid++) { count [tid + 1] += count [tid] ; } //---------------------------------------------------------------------- // construct T //---------------------------------------------------------------------- // T (i,:) consists of the first FASTSV_SAMPLES of S (i,:). // TODO: this could be done by GxB_Select, using a new operator. Need // to define a set of GxB_SelectOp operators that would allow for this. // Note that Tx is not modified. Only Tp and Tj are constructed. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index p = count [tid] ; Tp [range [tid]] = p ; for (int32_t i = range [tid] ; i < range [tid+1] ; i++) { // construct T (i,:) from the first entries in S (i,:) for (int32_t j = 0 ; j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1] ; j++) { Tj [p++] = Sj [Sp [i] + j] ; } Tp [i + 1] = p ; } } //---------------------------------------------------------------------- // import the result into the GrB_Matrix T //---------------------------------------------------------------------- // Note that Tx is unmodified and contains uninitialized values. // TODO: T should held as a uniform-valued matrix, once GraphBLAS // allows for this. #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) // in SuiteSparse:GraphBLAS v5, sizes are in bytes, not entries GrB_Index Tp_siz = Tp_size ; GrB_Index Tj_siz = Tj_size ; GrB_Index Tx_siz = Tx_size ; GrB_Index t_nvals = Tp [nrows] ; GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, false, S_jumbled, NULL)) ; #elif GxB_IMPLEMENTATION >= GxB_VERSION (5,0,0) // in SuiteSparse:GraphBLAS v5, sizes are in bytes, not entries GrB_Index Tp_siz = Tp_size ; GrB_Index Tj_siz = Tj_size ; GrB_Index Tx_siz = Tx_size ; GrB_Index t_nvals = Tp [nrows] ; GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, S_jumbled, NULL)) ; #else // in SuiteSparse:GraphBLAS v4, sizes are in # of entries, not bytes GrB_Index Tp_siz = Tp_len ; GrB_Index Tj_siz = Tj_len ; GrB_Index Tx_siz = Tx_len ; GrB_Index t_nvals = Tp [nrows] ; GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, S_jumbled, NULL)) ; #endif //---------------------------------------------------------------------- // find the connected components of T //---------------------------------------------------------------------- // TODO: this is nearly identical to the final phase below. // Make this a function bool change = true, is_first = true ; while (change) { // hooking & shortcutting GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, T, gp, NULL)) ; if (!is_first) { LAGraph_TRY (Reduce_assign32 (&f, &mngp, V32, n, nthreads, ht_key, ht_val, &seed, msg)) ; } GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL)) ; // calculate grandparent GrB_TRY (GrB_Vector_extractTuples (NULL, V32, &n, f)) ; #pragma omp parallel for num_threads(nthreads2) schedule(static) for (uint32_t i = 0 ; i < n ; i++) { I [i] = (GrB_Index) V32 [i] ; } GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, I, n, NULL)) ; // TODO: GrB_Vector_extract should have a variant where the index // list is not given by an array I, but as a GrB_Vector of type // GrB_UINT64 (or which can be typecast to GrB_UINT64). This is a // common issue that arises in other algorithms as well. // Likewise GrB_Matrix_extract, and all forms of GrB_assign. // check termination GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL)) ; GrB_TRY (GrB_reduce (&change, NULL, GxB_LOR_BOOL_MONOID, mod, NULL)) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; is_first = false ; } //---------------------------------------------------------------------- // TODO: describe me //---------------------------------------------------------------------- ht_init (ht_key, ht_val) ; ht_sample (V32, n, HASH_SAMPLES, ht_key, ht_val, &seed) ; int32_t key = ht_most_frequent (ht_key, ht_val) ; // TODO: what if key is returned as -1? Then T below is invalid. int64_t t_nonempty = -1 ; bool T_jumbled = false, T_is_uniform ; // export T #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) GrB_TRY (GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &Tp, &Tj, &Tx, &Tp_siz, &Tj_siz, &Tx_siz, &T_is_uniform, &T_jumbled, NULL)) ; #else GrB_TRY (GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &Tp, &Tj, &Tx, &Tp_siz, &Tj_siz, &Tx_siz, &T_jumbled, NULL)) ; #endif // TODO what is this phase doing? It is constructing a matrix T that // depends only on S, key, and V32. T contains a subset of the entries // in S, except that T (i,:) is empty if // The prior content of T is ignored; it is exported from the earlier // phase, only to reuse the allocated space for T. However, T_jumbled // is preserved from the prior matrix T, which doesn't make sense. // This parallel loop is badly load balanced. Each thread operates on // the same number of rows of S, regardless of how many entries appear // in each set of rows. It uses one thread per task, statically // scheduled. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index ptr = Sp [range [tid]] ; // thread tid scans S (range [tid]:range [tid+1]-1,:), // and constructs T(i,:) for all rows in this range. for (int32_t i = range [tid] ; i < range [tid+1] ; i++) { int32_t pv = V32 [i] ; // what is pv? Tp [i] = ptr ; // start the construction of T(i,:) // T(i,:) is empty if pv == key if (pv != key) { // scan S(i,:) for (GrB_Index p = Sp [i] ; p < Sp [i+1] ; p++) { // get S(i,j) int32_t j = Sj [p] ; if (V32 [j] != key) { // add the entry T(i,j) to T, but skip it if // V32 [j] is equal to key Tj [ptr++] = j ; } } // add the entry T(i,key) if there is room for it in T(i,:) if (ptr - Tp [i] < Sp [i+1] - Sp [i]) { Tj [ptr++] = key ; } } } // count the number of entries inserted into T by this thread? count [tid] = ptr - Tp [range [tid]] ; } // Compact empty space out of Tj not filled in from the above phase. // This is a lot of work and should be done in parallel. GrB_Index offset = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { memcpy (Tj + offset, Tj + Tp [range [tid]], sizeof (GrB_Index) * count [tid]) ; offset += count [tid] ; count [tid] = offset - count [tid] ; } // Compact empty space out of Tp #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index ptr = Tp [range [tid]] ; for (int32_t i = range [tid] ; i < range [tid+1] ; i++) { Tp [i] -= ptr - count [tid] ; } } // finalize T Tp [n] = offset ; // free workspace LAGraph_Free ((void **) &count, count_size) ; LAGraph_Free ((void **) &range, range_size) ; // import S (unchanged since last export) #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) GrB_TRY (GxB_Matrix_import_CSR (&S, type, nrows, ncols, &Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size, S_is_uniform, S_jumbled, NULL)) ; #else GrB_TRY (GxB_Matrix_import_CSR (&S, type, nrows, ncols, &Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size, S_jumbled, NULL)) ; #endif // import T for the final phase #if GxB_IMPLEMENTATION >= GxB_VERSION (5,0,1) GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, T_is_uniform, T_jumbled, NULL)) ; #else GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, T_jumbled, NULL)) ; #endif // restore G->A G->A = S ; } else { // no sampling; the final phase operates on the whole graph T = S ; } //-------------------------------------------------------------------------- // final phase //-------------------------------------------------------------------------- GrB_TRY (GrB_Matrix_nvals (&nnz, T)) ; bool change = true ; while (change && nnz > 0) { // hooking & shortcutting GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, T, gp, NULL)) ; GrB_TRY (Reduce_assign32 (&f, &mngp, V32, n, nthreads, ht_key, ht_val, &seed, msg)) ; GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL)) ; // calculate grandparent GrB_TRY (GrB_Vector_extractTuples (NULL, V32, &n, f)) ; #pragma omp parallel for num_threads(nthreads2) schedule(static) for (uint32_t k = 0 ; k < n ; k++) { I [k] = (GrB_Index) V32 [k] ; } GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, I, n, NULL)) ; // check termination GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL)) ; GrB_TRY (GrB_reduce (&change, NULL, GxB_LOR_BOOL_MONOID, mod, NULL)) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- (*component) = f ; f = NULL ; if (sampling) { GrB_free (&T) ; } LAGRAPH_FREE_ALL ; return (0) ; }
ompt.h
/* * include/45/ompt.h.var */ #ifndef __OMPT__ #define __OMPT__ /***************************************************************************** * system include files *****************************************************************************/ #include <stdint.h> /***************************************************************************** * iteration macros *****************************************************************************/ #define FOREACH_OMPT_INQUIRY_FN(macro) \ macro (ompt_enumerate_states) \ \ macro (ompt_set_callback) \ macro (ompt_get_callback) \ \ macro (ompt_get_state) \ \ macro (ompt_get_parallel_info) \ macro (ompt_get_task_info) \ macro (ompt_get_thread_data) \ macro (ompt_get_unique_id) #define FOREACH_OMPT_PLACEHOLDER_FN(macro) \ macro (ompt_idle) \ macro (ompt_overhead) \ macro (ompt_barrier_wait) \ macro (ompt_task_wait) \ macro (ompt_mutex_wait) #define FOREACH_OMPT_STATE(macro) \ \ /* first */ \ macro (ompt_state_first, 0x71) /* initial enumeration state */ \ \ /* work states (0..15) */ \ macro (ompt_state_work_serial, 0x00) /* working outside parallel */ \ macro (ompt_state_work_parallel, 0x01) /* working within parallel */ \ macro (ompt_state_work_reduction, 0x02) /* performing a reduction */ \ \ /* idle (16..31) */ \ macro (ompt_state_idle, 0x10) /* waiting for work */ \ \ /* overhead states (32..63) */ \ macro (ompt_state_overhead, 0x20) /* overhead excluding wait states */ \ \ /* barrier wait states (64..79) */ \ macro (ompt_state_wait_barrier, 0x40) /* waiting at a barrier */ \ macro (ompt_state_wait_barrier_implicit, 0x41) /* implicit barrier */ \ macro (ompt_state_wait_barrier_explicit, 0x42) /* explicit barrier */ \ \ /* task wait states (80..95) */ \ macro (ompt_state_wait_taskwait, 0x50) /* waiting at a taskwait */ \ macro (ompt_state_wait_taskgroup, 0x51) /* waiting at a taskgroup */ \ \ /* mutex wait states (96..111) */ \ macro (ompt_state_wait_lock, 0x60) /* waiting for lock */ \ macro (ompt_state_wait_nest_lock, 0x61) /* waiting for nest lock */ \ macro (ompt_state_wait_critical, 0x62) /* waiting for critical */ \ macro (ompt_state_wait_atomic, 0x63) /* waiting for atomic */ \ macro (ompt_state_wait_ordered, 0x64) /* waiting for ordered */ \ macro (ompt_state_wait_single, 0x6F) /* waiting for single region (non-standard!) */ \ \ /* misc (112..127) */ \ macro (ompt_state_undefined, 0x70) /* undefined thread state */ #define FOREACH_OMPT_EVENT(macro) \ \ /*--- Mandatory Events ---*/ \ macro (ompt_callback_thread_begin, ompt_callback_thread_begin_t, 1) /* thread begin */ \ macro (ompt_callback_thread_end, ompt_callback_thread_end_t, 2) /* thread end */ \ \ macro (ompt_callback_parallel_begin, ompt_callback_parallel_begin_t, 3) /* parallel begin */ \ macro (ompt_callback_parallel_end, ompt_callback_parallel_end_t, 4) /* parallel end */ \ \ macro (ompt_callback_task_create, ompt_callback_task_create_t, 5) /* task begin */ \ macro (ompt_callback_task_schedule, ompt_callback_task_schedule_t, 6) /* task schedule */ \ macro (ompt_callback_implicit_task, ompt_callback_implicit_task_t, 7) /* implicit task */ \ \ /*macro (ompt_callback_target, ompt_callback_target_t, 8)*/ /* target */ \ /*macro (ompt_callback_target_data_op ompt_callback_target_data_op_t, 9)*/ /* target data op*/ \ /*macro (ompt_callback_target_submit, ompt_callback_target_submit_t, 10)*/ /* target submit*/ \ \ /*macro (ompt_callback_control_tool, ompt_callback_control_tool_t, 11)*/ /* control tool */ \ \ /*macro (ompt_callback_device_initialize, ompt_callback_device_initialize_t, 12)*/ /* device initialize */ \ \ /*--- Optional Events (blame shifting, ompt_event_unimplemented) ---*/ \ macro (ompt_callback_idle, ompt_callback_idle_t, 13) /* begin or end idle state */\ \ macro (ompt_callback_sync_region_wait, ompt_callback_sync_region_t, 14) /* sync region wait begin or end*/ \ \ macro (ompt_callback_mutex_released, ompt_callback_mutex_t, 15) /* mutex released */ \ \ /*--- Optional Events (synchronous events, ompt_event_unimplemented) --- */ \ \ macro (ompt_callback_task_dependences, ompt_callback_task_dependences_t, 16) /* report task dependences */\ macro (ompt_callback_task_dependence, ompt_callback_task_dependence_t, 17) /* report task dependence */\ \ macro (ompt_callback_work, ompt_callback_work_t, 18) /* task at work begin or end*/\ \ macro (ompt_callback_master, ompt_callback_master_t, 19) /* task at master begin or end */\ \ /*macro (ompt_callback_target_map, ompt_callback_target_map_t, 20)*/ /* target map */ \ \ macro (ompt_callback_sync_region, ompt_callback_sync_region_t, 21) /* sync region begin or end */ \ \ macro (ompt_callback_lock_init, ompt_callback_lock_init_t, 22) /* lock init */ \ macro (ompt_callback_lock_destroy, ompt_callback_lock_destroy_t, 23) /* lock destroy */ \ \ macro (ompt_callback_mutex_acquire, ompt_callback_mutex_acquire_t, 24) /* mutex acquire */ \ macro (ompt_callback_mutex_acquired, ompt_callback_mutex_t, 25) /* mutex acquired */ \ \ macro (ompt_callback_nest_lock, ompt_callback_nest_lock_t, 26) /* nest lock */ \ \ macro (ompt_callback_flush, ompt_callback_flush_t, 27) /* after executing flush */ \ \ macro (ompt_callback_cancel, ompt_callback_cancel_t, 28) /*cancel innermost binding region*/\ \ \ /*--- Old Events --- */ \ macro (ompt_event_control, ompt_control_callback_t, 29) /* support control calls */ \ \ /*--- Custom Events (part of extensions to generate grain graphs ---*/ \ macro (ext_callback_task_create_begin, ext_callback_task_create_begin_t, 80) /* end of task creation */\ macro (ext_callback_loop, ext_callback_loop_t, 81) /* loop construct */ \ macro (ext_callback_chunk, ext_callback_chunk_t, 82) /* chunk is scheduled */ \ /***************************************************************************** * data types *****************************************************************************/ /*--------------------- * identifiers *---------------------*/ typedef uint64_t ompt_id_t; typedef union ompt_data_u { ompt_id_t value; /* data initialized by runtime to unique id */ void *ptr; /* pointer under tool control */ } ompt_data_t; typedef ompt_data_t ompt_thread_data_t; #define ompt_thread_id_none {.value=0} /* non-standard */ typedef ompt_data_t ompt_task_data_t; //#define ompt_task_id_none ((ompt_task_data_t) 0) /* non-standard */ #define ompt_task_id_none {.value=0} /* non-standard */ typedef ompt_data_t ompt_parallel_data_t; //#define ompt_parallel_id_none ((ompt_parallel_data_t) 0) /* non-standard */ #define ompt_parallel_id_none {.value=0} /* non-standard */ typedef uint64_t ompt_wait_id_t; #define ompt_wait_id_none ((ompt_wait_id_t) 0) /* non-standard */ /*--------------------- * ompt_frame_t *---------------------*/ typedef struct ompt_frame_s { void *exit_runtime_frame; /* next frame is user code */ void *reenter_runtime_frame; /* previous frame is user code */ } ompt_frame_t; /*--------------------- * dependences types *---------------------*/ typedef enum ompt_task_dependence_flag_e { // a two bit field for the dependence type ompt_task_dependence_type_out = 1, ompt_task_dependence_type_in = 2, ompt_task_dependence_type_inout = 3, } ompt_task_dependence_flag_t; typedef struct ompt_task_dependence_s { void *variable_addr; uint32_t dependence_flags; } ompt_task_dependence_t; /***************************************************************************** * enumerations for thread states and runtime events *****************************************************************************/ /*--------------------- * runtime states *---------------------*/ typedef enum { #define ompt_state_macro(state, code) state = code, FOREACH_OMPT_STATE(ompt_state_macro) #undef ompt_state_macro } ompt_state_t; /*--------------------- * runtime events *---------------------*/ typedef enum ompt_callbacks_e{ #define ompt_event_macro(event, callback, eventid) event = eventid, FOREACH_OMPT_EVENT(ompt_event_macro) #undef ompt_event_macro } ompt_callbacks_t; /*--------------------- * set callback results *---------------------*/ /*typedef enum { ompt_set_result_registration_error = 0, ompt_set_result_event_may_occur_no_callback = 1, ompt_set_result_event_never_occurs = 2, ompt_set_result_event_may_occur_callback_some = 3, ompt_set_result_event_may_occur_callback_always = 4, } ompt_set_result_t; */ typedef enum ompt_set_result_e { ompt_set_error = 0, ompt_set_never = 1, ompt_set_sometimes = 2, ompt_set_sometimes_paired = 3, ompt_set_always = 4 } ompt_set_result_t; /***************************************************************************** * callback signatures *****************************************************************************/ /* initialization */ typedef void (*ompt_interface_fn_t)(void); typedef ompt_interface_fn_t (*ompt_function_lookup_t)( const char * /* entry point to look up */ ); /* threads */ typedef void (*ompt_thread_callback_t) ( ompt_thread_data_t thread_data /* data of thread */ ); typedef enum { ompt_thread_initial = 1, // start the enumeration at 1 ompt_thread_worker = 2, ompt_thread_other = 3 } ompt_thread_type_t; typedef enum { ompt_invoker_program = 0, /* program invokes master task */ ompt_invoker_runtime = 1 /* runtime invokes master task */ } ompt_invoker_t; typedef void (*ompt_callback_thread_begin_t) ( ompt_thread_type_t thread_type, /* type of thread */ ompt_data_t *thread_data /* data of thread */ ); typedef void (*ompt_callback_thread_end_t) ( ompt_data_t *thread_data /* data of thread */ ); typedef void (*ompt_thread_type_callback_t) ( ompt_thread_type_t thread_type, /* type of thread */ ompt_thread_data_t thread_data /* data of thread */ ); typedef void (*ompt_wait_callback_t) ( ompt_wait_id_t wait_id /* wait data */ ); /* parallel and workshares */ typedef void (*ompt_parallel_callback_t) ( ompt_parallel_data_t parallel_data, /* data of parallel region */ ompt_task_data_t task_data /* data of task */ ); typedef enum ompt_scope_endpoint_e { ompt_scope_begin = 1, ompt_scope_end = 2 } ompt_scope_endpoint_t; /* implicit task */ typedef void (*ompt_callback_implicit_task_t) ( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num ); typedef void (*ompt_new_workshare_callback_t) ( ompt_parallel_data_t parallel_data, /* data of parallel region */ ompt_task_data_t parent_task_data, /* data of parent task */ void *workshare_function /* pointer to outlined function */ ); typedef void (*ompt_callback_parallel_begin_t) ( ompt_data_t *parent_task_data, /* data of parent task */ const ompt_frame_t *parent_frame, /* frame data of parent task */ ompt_data_t *parallel_data, /* data of parallel region */ unsigned int requested_team_size, /* requested number of threads in team */ // unsigned int actual_team_size, /* actual number of threads in team */ ompt_invoker_t invoker, /* who invokes master task? */ const void *codeptr_ra ); typedef void (*ompt_callback_parallel_end_t) ( ompt_data_t *parallel_data, /* data of parallel region */ ompt_task_data_t *task_data, /* data of task */ ompt_invoker_t invoker, /* who invokes master task? */ const void *codeptr_ra ); /* tasks */ typedef enum ompt_task_type_e { ompt_task_initial = 1, ompt_task_implicit = 2, ompt_task_explicit = 3, ompt_task_target = 4 } ompt_task_type_t; typedef enum ompt_task_status_e { ompt_task_complete = 1, ompt_task_yield = 2, ompt_task_cancel = 3, ompt_task_others = 4 } ompt_task_status_t; typedef void (*ompt_callback_task_schedule_t) ( ompt_task_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_task_data_t *second_task_data ); typedef void (*ompt_callback_task_create_t) ( ompt_data_t *parent_task_data, /* data of parent task */ const ompt_frame_t *parent_frame, /* frame data for parent task */ ompt_data_t *new_task_data, /* data of created task */ ompt_task_type_t type, int has_dependences, const void *codeptr_ra ); /* PVL: custom type */ typedef void (*ext_callback_task_create_begin_t) ( ompt_data_t *parent_task_data, const ompt_frame_t *parent_frame, ompt_task_type_t type ); /* task dependences */ typedef void (*ompt_callback_task_dependences_t) ( ompt_data_t *task_data, const ompt_task_dependence_t *deps, int ndeps ); typedef void (*ompt_callback_task_dependence_t) ( ompt_data_t *src_task_data, ompt_data_t *sink_task_data ); /* program */ typedef void (*ompt_control_callback_t) ( uint64_t command, /* command of control call */ uint64_t modifier /* modifier of control call */ ); typedef void (*ompt_callback_t)(void); typedef enum ompt_mutex_kind_e { ompt_mutex = 0x10, ompt_mutex_lock = 0x11, ompt_mutex_nest_lock = 0x12, ompt_mutex_critical = 0x13, ompt_mutex_atomic = 0x14, ompt_mutex_ordered = 0x20 } ompt_mutex_kind_t; typedef void (*ompt_callback_mutex_acquire_t) ( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra ); typedef void (*ompt_callback_mutex_t) ( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra ); typedef void (*ompt_callback_lock_init_t) ( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra ); typedef void (*ompt_callback_lock_destroy_t) ( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra ); typedef void (*ompt_callback_nest_lock_t) ( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra ); typedef void (*ompt_callback_master_t) ( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra ); typedef void (*ompt_callback_idle_t) ( ompt_scope_endpoint_t endpoint ); typedef enum ompt_work_type_e { ompt_work_loop = 1, ompt_work_sections = 2, ompt_work_single_executor = 3, ompt_work_single_other = 4, ompt_work_workshare = 5, ompt_work_distribute = 6, ompt_work_taskloop = 7 } ompt_work_type_t; typedef void (*ompt_callback_work_t) ( ompt_work_type_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra ); /* PVL: custom type */ typedef enum ext_loop_sched_e { ext_loop_sched_static = 1, ext_loop_sched_dynamic = 2, ext_loop_sched_guided = 3 } ext_loop_sched_t; /* PVL: custom type */ #ifndef OMPT_STATIC_CHUNKS typedef void (*ext_callback_loop_t) ( ext_loop_sched_t loop_sched, ompt_scope_endpoint_t endpoint, ompt_data_t * parallel_data, // The parallel region ompt_data_t * task_data, // The implicit task of the worker int64_t lower, // Lower iteration bound int64_t upper, // Upper iteration bound int64_t step, // Increment uint64_t chunk_size, // Chunk size int64_t thread_lower, // Lower iter. bound for thread const void * codeptr_ra ); #else // OMPT_STATIC_CHUNKS is defined typedef void (*ext_callback_loop_t) ( ext_loop_sched_t loop_sched, ompt_scope_endpoint_t endpoint, ompt_data_t * parallel_data, // The parallel region ompt_data_t * task_data, // The implicit task of the worker int64_t step, // Increment const void * codeptr_ra ); #endif // OMPT_STATIC_CHUNKS /* PVL: custom type */ typedef void (*ext_callback_chunk_t) ( ompt_data_t *task_data, int64_t lower, int64_t upper, int last_chunk // Is scheduled chunk last for thread? ); typedef enum ompt_sync_region_kind_e { ompt_sync_region_barrier = 1, ompt_sync_region_taskwait = 2, ompt_sync_region_taskgroup = 3 } ompt_sync_region_kind_t; typedef void (*ompt_callback_sync_region_t) ( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra ); typedef enum ompt_cancel_flag_e { ompt_cancel_parallel = 0x1, ompt_cancel_sections = 0x2, ompt_cancel_do = 0x4, ompt_cancel_taskgroup = 0x8, ompt_cancel_activated = 0x10, ompt_cancel_detected = 0x20, ompt_cancel_discarded_task = 0x40 } ompt_cancel_flag_t; typedef void (*ompt_callback_cancel_t) ( ompt_data_t *task_data, int flags, const void *codeptr_ra ); typedef void (*ompt_callback_flush_t) ( ompt_data_t *thread_data, const void *codeptr_ra ); /**************************************************************************** * ompt API ***************************************************************************/ #ifdef __cplusplus extern "C" { #endif #define OMPT_API_FNTYPE(fn) fn##_t #define OMPT_API_FUNCTION(return_type, fn, args) \ typedef return_type (*OMPT_API_FNTYPE(fn)) args /**************************************************************************** * INQUIRY FUNCTIONS ***************************************************************************/ /* state */ OMPT_API_FUNCTION(ompt_state_t, ompt_get_state, ( ompt_wait_id_t *wait_id )); /* thread */ OMPT_API_FUNCTION(ompt_thread_data_t*, ompt_get_thread_data, (void)); /* parallel region */ OMPT_API_FUNCTION(int, ompt_get_parallel_info, ( int ancestor_level, ompt_data_t **parallel_data, int *team_size )); /* task */ OMPT_API_FUNCTION(int, ompt_get_task_info, ( int ancestor_level, ompt_task_type_t *type, ompt_data_t **task_data, ompt_frame_t **task_frame, ompt_data_t **parallel_data, int *thread_num )); /**************************************************************************** * PLACEHOLDERS FOR PERFORMANCE REPORTING ***************************************************************************/ /* idle */ OMPT_API_FUNCTION(void, ompt_idle, ( void )); /* overhead */ OMPT_API_FUNCTION(void, ompt_overhead, ( void )); /* barrier wait */ OMPT_API_FUNCTION(void, ompt_barrier_wait, ( void )); /* task wait */ OMPT_API_FUNCTION(void, ompt_task_wait, ( void )); /* mutex wait */ OMPT_API_FUNCTION(void, ompt_mutex_wait, ( void )); /**************************************************************************** * INITIALIZATION FUNCTIONS ***************************************************************************/ typedef struct ompt_fns_t ompt_fns_t; OMPT_API_FUNCTION(int, ompt_initialize, ( ompt_function_lookup_t ompt_fn_lookup, ompt_fns_t *fns )); OMPT_API_FUNCTION(void, ompt_finalize, ( ompt_fns_t *fns )); typedef struct ompt_fns_t { ompt_initialize_t initialize; ompt_finalize_t finalize; } ompt_fns_t; /* initialization interface to be defined by tool */ ompt_fns_t * ompt_start_tool( unsigned int omp_version, const char * runtime_version ); typedef enum opt_init_mode_e { ompt_init_mode_never = 0, ompt_init_mode_false = 1, ompt_init_mode_true = 2, ompt_init_mode_always = 3 } ompt_init_mode_t; OMPT_API_FUNCTION(int, ompt_set_callback, ( ompt_callbacks_t event, ompt_callback_t callback )); typedef enum ompt_set_callback_rc_e { /* non-standard */ ompt_set_callback_error = 0, ompt_has_event_no_callback = 1, ompt_no_event_no_callback = 2, ompt_has_event_may_callback = 3, ompt_has_event_must_callback = 4, } ompt_set_callback_rc_t; OMPT_API_FUNCTION(int, ompt_get_callback, ( ompt_callbacks_t event, ompt_callback_t *callback )); /**************************************************************************** * MISCELLANEOUS FUNCTIONS ***************************************************************************/ /* control */ // FIXME: remove workaround for clang #if !defined(__clang__) && defined(_OPENMP) && (_OPENMP >= 201307) #pragma omp declare target #endif void ompt_control( uint64_t command, uint64_t modifier ); #if !defined(__clang__) && defined(_OPENMP) && (_OPENMP >= 201307) #pragma omp end declare target #endif /* state enumeration */ OMPT_API_FUNCTION(int, ompt_enumerate_states, ( int current_state, int *next_state, const char **next_state_name )); /* get_unique_id */ OMPT_API_FUNCTION(uint64_t, ompt_get_unique_id, (void)); #ifdef __cplusplus }; #endif #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates >> 2)) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); if ((IsNaN(value) != 0) || (value < -(SSIZE_MAX-512.0)) || (value > (SSIZE_MAX-512.0))) return(0.0); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; if (coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info, ExpandAffine(&graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); status&=SetImageInfo(clone_info,0,exception); if ((LocaleNCompare(clone_info->magick,"http",4) == 0) || (LocaleCompare(clone_info->magick,"mpri") == 0)) (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (coordinates > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(MaxStrokePad,MaxStrokePad); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
hermv_c_bsr_n_lo_trans.c
#include<string.h> #ifdef _OPENMP #include<omp.h> #endif #include"alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows * A->block_size; const ALPHA_INT n = A->cols * A->block_size; const ALPHA_INT bs = A->block_size; const ALPHA_INT bs2 = bs * bs; // assert(m==n); ALPHA_INT b_rows = A->rows; ALPHA_INT b_cols = A->cols; if (b_rows != b_cols) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs); memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs); if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR) { for (ALPHA_INT br = local_m_s; br < local_m_e; br++) { ALPHA_INT row = br * bs; ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br]; ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx; for (ALPHA_INT ai = block_start; ai < lower_end; ai++) { ALPHA_INT bc = A->col_indx[ai]; ALPHA_INT col = bc * bs; ALPHA_INT a0_idx = ai * bs2; // diagonal block containing diagonal entry if (bc == br) { for (ALPHA_INT b_row = 0; b_row < bs; b_row++) { //dignaol entry A(row+b_row,col+b_col) alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * (bs + 1)], x[col + b_row]); for (ALPHA_INT b_col = 0; b_col < b_row; b_col++) { alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]); } } } else { for (ALPHA_INT b_row = 0; b_row < bs; b_row++) { for (ALPHA_INT b_col = 0; b_col < bs; b_col++) { alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]); } } } } } } else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR) { for (ALPHA_INT br = local_m_s; br < local_m_e; br++) { ALPHA_INT row = br * bs; ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br]; ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx; for (ALPHA_INT ai = block_start; ai < lower_end; ai++) { ALPHA_INT bc = A->col_indx[ai]; ALPHA_INT col = bc * bs; ALPHA_INT a0_idx = ai * bs2; // diagonal block containing diagonal entry if (bc == br) { for (ALPHA_INT b_col = 0; b_col < bs; b_col++) { alpha_madde(tmp[tid][b_col + row], A->values[a0_idx + b_col * (bs + 1)], x[b_col + col]); for (ALPHA_INT b_row = b_col + 1; b_row < bs; b_row++) { alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]); } } } else { for (ALPHA_INT b_col = 0; b_col < bs; b_col++) { for (ALPHA_INT b_row = 0; b_row < bs; b_row++) { alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]); } } } } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < b_cols * bs; ++i) { ALPHA_Number tmp_y; alpha_setzero(tmp_y); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); } alpha_mul(y[i], y[i], beta); alpha_madde(y[i], tmp_y, alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
ast-dump-openmp-teams-distribute-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target #pragma omp teams distribute simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target #pragma omp teams distribute simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target #pragma omp teams distribute simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target #pragma omp teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target #pragma omp teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:34> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:5:1, col:34> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:34> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:12:1, col:34> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:46> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:20:1, col:46> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:46> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:28:1, col:46> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1> // CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19> // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:46> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:46> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46> // CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-OMPTeamsDistributeSimdDirective {{.*}} <line:36:1, col:46> // CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
parallel_block.c
#include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <time.h> #define T 1000 #define NULL 0 long Kol_block_v_stroke,Kol_block_v_stolbce; char *String1,*String2; long M,N; typedef struct { int BLOCKI; int BLOCKJ; int maxi; int maxj; struct MaxElement* next; }MaxElement,*PMaxElement; typedef struct { int max; int maxi; int maxj; struct MAXIMAL* next; }MAXIMAL,*PMAX; typedef struct { //int col[T]; //int row[T]; int* col; int* row; PMAX el_max; }BLOCK,*PBLOCK; typedef struct { char inf; struct String* next; }String,*PString; PString Cons( char x, PString L) { PString T1; T1 = ( PString )malloc( sizeof(String)); T1->inf = x; T1->next = L; return T1; } PMAX Cons_el( int x,int i, int j, PMAX L) { PMAX T1; T1 = ( PMAX )malloc( sizeof(MAXIMAL)); T1->max = x; T1->maxi = i; T1->maxj = j; T1->next = L; return T1; } int max_from_two( int x, int y) { return ((x>=y)?x:y); } int min_from_two( int x, int y) { return ((x<=y)?x:y); } long size_of_file( FILE* fileS1) { long size_file; fseek(fileS1, 0, SEEK_END); // переместить указатель в конец файла size_file = ftell(fileS1); // получить текущую позицию fseek(fileS1, 0, SEEK_SET); // вернуть указатель на начало return size_file; } int read_file( FILE* file_string, char* String) { char b; int i; i = 0; while(!feof(file_string)){ b = fgetc(file_string); if (isalpha(b)) { String[i] = toupper(b); i++; } } return i; } FILE* write_to_file( FILE* file1, PString Str1,int x, int y) { PString Str; fprintf(file1," %c",'*'); while( Str1 ){ // printf(" %c",Str1->inf); fprintf(file1,"%c",Str1->inf); Str = Str1; Str1 = Str1->next; free(Str); } fprintf(file1," { %i %i }",x,y); fprintf(file1,"%c",'\n'); //printf("\n"); return file1; } PMaxElement maxel ( BLOCK **mas, PMaxElement B,int* p ) { int i,j,maximal,maximalI,maximalJ; PMaxElement m; maximal = 0; maximalI = 0; maximalJ = 0; for ( i = 0; i < Kol_block_v_stolbce; i++ ) for ( j = 0; j < Kol_block_v_stroke; j++ ) if ( mas[i][j].el_max->max >= maximal ){ maximal = mas[i][j].el_max->max; maximalI = mas[i][j].el_max->maxi; maximalJ = mas[i][j].el_max->maxj; } printf(" TT %i ",maximal); for ( i = 0; i < Kol_block_v_stolbce; i++) for ( j = 0; j < Kol_block_v_stroke; j++ ) if ( mas[i][j].el_max->max == maximal ){ while ( mas[i][j].el_max ) { m = ( PMaxElement )malloc( sizeof ( MaxElement )); m -> BLOCKI = i; m -> BLOCKJ = j; m -> maxi = mas[i][j].el_max->maxi-1; m -> maxj = mas[i][j].el_max->maxj-1; m -> next = B; B = m; mas[i][j].el_max = mas[i][j].el_max->next; } } *p = maximal; return m; } int differences(char S1, char S2, int penalty, int** BLOSUM) { int difference; if(( S2 == 'N')||(S1 == 'N')) difference = penalty/2; if( S2 == 'A') { if ( S1 == 'G') difference = BLOSUM [1][0]; if ( S1 == 'C') difference = BLOSUM [2][0]; if ( S1 == 'T') difference = BLOSUM [3][0]; if ( S1 == '-') difference = BLOSUM [4][0]; } if( S2 == 'G'){ if ( S1 == 'A') difference = BLOSUM [1][0]; if ( S1 == 'C') difference = BLOSUM [2][1]; if ( S1 == 'T') difference = BLOSUM [3][1]; if ( S1 == '-') difference = BLOSUM [4][1]; } if ( S2 == 'C') { if ( S1 == 'A') difference = BLOSUM [2][0]; if ( S1 == 'G') difference = BLOSUM [2][1]; if ( S1 == 'T') difference = BLOSUM [3][2]; if ( S1 == '-') difference = BLOSUM [4][2]; } if ( S2 == 'T'){ if ( S1 == 'A') difference = BLOSUM [3][0]; if ( S1 == 'G') difference = BLOSUM [3][1]; if ( S1 == 'C') difference = BLOSUM [3][2]; if ( S1 == '-') difference = BLOSUM [4][3]; } if( S2 == '-') { if ( S1 == 'G') difference = BLOSUM [4][1]; if ( S1 == 'C') difference = BLOSUM [4][2]; if ( S1 == 'T') difference = BLOSUM [4][3]; if ( S1 == 'A') difference = BLOSUM [4][0]; } return difference; } int similarities(char S2,int** BLOSUM) { int similarity; if ( S2 == 'A') similarity = BLOSUM [0][0]; if ( S2 == 'G') similarity = BLOSUM [1][1]; if ( S2 == 'C') similarity = BLOSUM [2][2]; if ( S2 == 'T') similarity = BLOSUM [3][3]; if ( S2 == '-') similarity = BLOSUM [4][4]; return similarity; } void blocks( int *rows, int *cols,int diag,int M_bl, int N_bl, PBLOCK A1,int penalty, int** BLOSUM) { int **h; int wp1,wp2; int i,j,Size_colomn,Size_row; int w,max,maximum; if ( N % T != 0 && N_bl == Kol_block_v_stroke -1 ) Size_row = N % T; else Size_row = T; if ( M % T != 0 && M_bl == Kol_block_v_stolbce - 1 ) Size_colomn = M % T; else Size_colomn = T; h = (int**)malloc(( Size_colomn + 1 )*sizeof(int*)); for ( i = 0; i < Size_colomn+1;i ++) h[i] = (int*)malloc(( Size_row + 1 )*sizeof(int)); h[0][0] = diag; for ( i = 1; i <= Size_colomn; i++) h[i][0] = cols[i-1]; for ( j = 1; j <= Size_row; j++) h[0][j] = rows[j-1]; maximum = 0; A1->row = (int*)calloc(T,sizeof(int)); A1->col = (int*)calloc(T,sizeof(int)); for ( i = 1; i <= Size_colomn; i++) for ( j = 1; j <= Size_row; j++) { max = 0; if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) { switch (String2[j-1+ N_bl*T]) { case 'A': w = BLOSUM [0][0]; break; case 'G': w = BLOSUM [1][1]; break; case 'C': w = BLOSUM [2][2]; break; case 'T': w = BLOSUM [3][3]; break; case '-': w = BLOSUM [4][4]; break; } } else { if(( String2[j-1+ N_bl*T] == 'N')||(String1[i-1 + M_bl*T] == 'N')) w = penalty/2; if( String2[j-1+ N_bl*T] == 'A') { switch (String1[i-1 + M_bl*T]) { case 'G': w = BLOSUM [1][0]; break; case 'C': w = BLOSUM [2][0]; break; case 'T': w = BLOSUM [3][0]; break; case '-': w = BLOSUM [4][0]; break; } } if( String2[j-1+ N_bl*T] == 'G'){ switch (String1[i-1 + M_bl*T]) { case 'A': w = BLOSUM [1][0]; break; case 'C': w = BLOSUM [2][1]; break; case 'T': w = BLOSUM [3][1]; break; case '-': w = BLOSUM [4][1]; break; } } if ( String2[j-1+ N_bl*T] == 'C') { switch (String1[i-1 + M_bl*T]) { case 'A': w = BLOSUM [2][0]; break; case 'G': w = BLOSUM [2][1]; break; case 'T': w = BLOSUM [3][2]; break; case '-': w = BLOSUM [4][2]; break; } } if ( String2[j-1+ N_bl*T] == 'T'){ switch (String1[i-1 + M_bl*T]) { case 'A': w = BLOSUM [3][0]; break; case 'G': w = BLOSUM [3][1]; break; case 'C': w = BLOSUM [3][2]; break; case '-': w = BLOSUM [4][3]; break; } } if( String2[j-1+ N_bl*T] == '-') { switch (String1[i-1 + M_bl*T]) { case 'G': w = BLOSUM [4][1]; break; case 'C': w = BLOSUM [4][2]; break; case 'T': w = BLOSUM [4][3]; break; case 'A': w = BLOSUM [4][0]; break; } } } if (String1[i-1+ M_bl*T] != '-') switch (String1[i-1+ M_bl*T]) { case 'G': wp1 = BLOSUM [4][1]; break; case 'C': wp1 = BLOSUM [4][2]; break; case 'T': wp1 = BLOSUM [4][3]; break; case 'A': wp1 = BLOSUM [4][0]; break; } else wp1 = BLOSUM [4][4]; if (String2[j-1+ N_bl*T] != '-') switch (String2[j-1+ N_bl*T]) { case 'G': wp2 = BLOSUM [4][1]; break; case 'C': wp2 = BLOSUM [4][2]; break; case 'T': wp2 = BLOSUM [4][3]; break; case 'A': wp2 = BLOSUM [4][0]; break; } else wp2 = BLOSUM [4][4]; /* if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) w = match; else w = mismatch; if (String1[i-1+ M_bl*T] != '_') wp1 = mismatch; else wp1 = match; if (String2[j-1+ N_bl*T] != '_') wp2 = mismatch; else wp2 = match; */ if (( h[i-1][ j-1] + w )> max ) max = h[i-1][ j-1] + w ; if ( ( h[i][ j-1] + wp2 )> max ) max = h[i][ j-1] + wp2 ; if ( ( h[i-1][ j] + wp1 )> max ) max = h[i-1][ j] + wp1 ; h[i][j] = max; if ( h[i][j] >= maximum ) maximum = h[i][j]; if ( i == T ) A1->row[j-1] = h[i][j]; if ( j == T ) A1->col[i-1] = h[i][j]; } A1->el_max = NULL; for ( i = 1; i <= Size_colomn; i++) for ( j = 1; j <= Size_row; j++) if ( h[i][j] == maximum ) A1->el_max = Cons_el(h[i][j],i,j,A1->el_max); for ( j=0; j<Size_colomn;j++) free (h[j]); free (h); } void clear (int M_bl, int N_bl,int i2, int j2, BLOCK** A1) { int i,j; for ( i = M_bl+1; i <= i2; i++) for ( j = 0 ; j <= j2; j++) { free(A1[i][j].col); free(A1[i][j].row); } for ( j = N_bl+1; j <= j2; j++) for ( i = 0; i <= M_bl; i++) { free(A1[i][j].col); free(A1[i][j].row); } } unsigned char** recollect( int *rows, int *cols,int diag,int M_bl, int N_bl,unsigned char **P,int penalty, int** BLOSUM) { int **h; unsigned char previous; int wp1,wp2; int i,j,Size_colomn,Size_row; int w,max; if ( N % T != 0 && N_bl == Kol_block_v_stroke -1 ) Size_row = N % T; else Size_row = T; if ( M % T != 0 && M_bl == Kol_block_v_stolbce - 1 ) Size_colomn = M % T; else Size_colomn = T; h = (int**)malloc(( Size_colomn + 1 )*sizeof(int*)); for ( i = 0; i < Size_colomn+1;i ++) h[i] = (int*)malloc(( Size_row + 1 )*sizeof(int)); h[0][0] = diag; for ( i = 1; i <= Size_colomn; i++) h[i][0] = cols[i-1]; for ( j = 1; j <= Size_row; j++) h[0][j] = rows[j-1]; for ( i = 1; i <= Size_colomn; i++) for ( j = 1; j <= Size_row; j++) { max = 0; previous = 0; /* if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) w = match; else w = mismatch; if (String1[i-1+ M_bl*T] != '_') wp1 = mismatch; else wp1 = match; if (String2[j-1+ N_bl*T] != '_') wp2 = mismatch; else wp2 = match; */ if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) w = similarities(String2[j-1+ N_bl*T],BLOSUM); else w = differences(String1[i-1 + M_bl*T],String2[j-1+ N_bl*T],penalty,BLOSUM); if (String1[i-1+ M_bl*T] != '-') wp1 = differences(String1[i-1 + M_bl*T],'-',penalty,BLOSUM); else wp1 = similarities('-',BLOSUM); if (String2[j-1+ N_bl*T] != '-') wp2 = differences('-',String2[j-1+ N_bl*T],penalty,BLOSUM); else wp2 = similarities('-',BLOSUM); if (( h[i-1][ j-1] + w )> max ) { max = h[i-1][ j-1] + w ; previous = 2; } if ( ( h[i][ j-1] + wp2 )> max ){ max = h[i][ j-1] + wp2 ; previous = 1; } if ( ( h[i-1][ j] + wp1 )> max ) { max = h[i-1][ j] + wp1 ; previous = 3; } h[i][j] = max; P[i-1][j-1] = previous; } for ( j=0; j<Size_colomn;j++) free (h[j]); free (h); return P; } int main() { FILE * fileS1,*fileS2; long size_file; int c; int i,j,i1,j1,i2,j2,i3,j3; int **BLOSUM; int *row1; unsigned char **Prev; int space,dif,sim,score ; int *p; int penalty ; // штраф за разрыв int endI,endJ; // указывают на позиции в исходных строках, на которых // было закончено выравнивание; int beginI,beginJ;// указывают позиции в строках, начиная с которых // найдено оптимальное выравнивание; int difference, similarity; PString Str1,Str2; BLOCK ** mat; PMaxElement L; time_t time1,time2; time1 = clock(); fileS1=fopen("string1.txt","rb"); size_file = size_of_file(fileS1); String1 = (char*)malloc( size_file * sizeof(char)); M = read_file(fileS1,String1); fclose(fileS1); fileS2=fopen("string2.txt","rb"); size_file = size_of_file(fileS2); String2 = (char*)malloc( size_file * sizeof(char)); N = read_file(fileS2,String2); fclose(fileS2); printf("\n Length of First string is %i\n",M); printf("\n Length of Second string is %i\n",N); // выделяем память для двумерной матрицы BLOSUM; BLOSUM = (int**)malloc( 5 * sizeof(int*)); for ( i = 0; i < 5;i ++) BLOSUM[i] = (int*)malloc( 5 * sizeof(int)); // считываем матрицу BLOSUM из файла BLOSUM.txt; поставь пробел в матрице после всех чисел fileS1 = fopen("BLOSUM.txt","r"); fscanf(fileS1," %i",&penalty); for(i = 0; i < 5; i++) for(j = 0; j < 5; j++){ fscanf(fileS1," %i",&c); while((c!=' ')&&(!feof(fileS1))){ BLOSUM[i][j] = c; c = fgetc(fileS1); } } fclose(fileS1); if ( N%T == 0) Kol_block_v_stroke = N/T; else Kol_block_v_stroke = N/T + 1; if ( M%T == 0) Kol_block_v_stolbce = M/T; else Kol_block_v_stolbce = M/T + 1; mat = ( BLOCK ** )malloc( Kol_block_v_stolbce * sizeof(BLOCK *));//M_block for ( i = 0; i < Kol_block_v_stolbce ;i ++) mat[i] = ( BLOCK *)malloc( Kol_block_v_stroke * sizeof(BLOCK));//N_block row1 = (int*)malloc( T * sizeof(int)); for ( i = 0; i < T; i++){ row1[i] = 0; } //for ( i = 0; i < Kol_block_v_stolbce;i++) //for ( j = 0; j < Kol_block_v_stroke; j++) for (j = 0; j <= Kol_block_v_stolbce + Kol_block_v_stroke - 2; j = j + 1) { #pragma omp parallel for for (i = max_from_two(0, j - Kol_block_v_stroke + 1); i <= min_from_two(Kol_block_v_stolbce-1, j); i = i + 1) if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i],penalty,BLOSUM); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i],penalty,BLOSUM); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i],penalty,BLOSUM); else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i],penalty,BLOSUM); } /* if ( Kol_block_v_stroke-1 < Kol_block_v_stolbce) { for (j = 0; j <= Kol_block_v_stroke-2; j = j + 1) { #pragma omp parallel for for (i = 0; i <= j; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } } else { for (j = 0; j < Kol_block_v_stolbce-1; j = j + 1) { #pragma omp parallel for for (i = 0; i <= j ; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } for (j = Kol_block_v_stolbce-1; j < Kol_block_v_stroke-1; j = j + 1) { #pragma omp parallel for for (i = 0; i < Kol_block_v_stolbce; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-1-i].col,0,i,j-i,&mat[i][j-i]); else if ( j-i== 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } } if ( Kol_block_v_stroke > Kol_block_v_stolbce) for (j = Kol_block_v_stroke-1; j <= Kol_block_v_stolbce + Kol_block_v_stroke - 2; j = j + 1) { #pragma omp parallel for for (i = j - Kol_block_v_stroke+1 ; i < Kol_block_v_stolbce; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } else { for (j = Kol_block_v_stroke-1; j < Kol_block_v_stolbce-1; j = j + 1) { #pragma omp parallel for for (i = j - Kol_block_v_stroke+1 ; i <= j; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i== 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } for (j = Kol_block_v_stolbce-1; j <= Kol_block_v_stolbce - 2 + Kol_block_v_stroke; j = j + 1) { #pragma omp parallel for for (i = j - Kol_block_v_stroke+1; i < Kol_block_v_stolbce; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]); else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } } */ L = NULL; p = &score; L = maxel(mat,L,p); printf("SCORE %i ",score); printf("Alignment\n"); fileS1 = fopen("newS1.txt","w"); fileS2 = fopen("newS2.txt","w"); Prev = (unsigned char**)calloc(T,sizeof(unsigned char*)); for ( i = 0; i < T;i ++) Prev[i] = (unsigned char*)calloc(T,sizeof(unsigned char)); while( L ){ space = 0; dif = 0; sim = 0; if ( L ){ Str1 = NULL; Str2 = NULL; i = L->maxi + T * L->BLOCKI; j = L->maxj + T * L->BLOCKJ; printf(" %i %i ",i,j); i1 = L->BLOCKI; j1 = L->BLOCKJ; if ( L->BLOCKI == 0) if ( L->BLOCKJ == 0) { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(row1,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(row1,mat[i1][j1-1].col,0,i1,j1,Prev,penalty,BLOSUM); } else if ( L->BLOCKJ == 0) { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(mat[i1-1][j1].row,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(mat[i1-1][j1].row,mat[i1][j1-1].col,mat[i1-1][j1-1].col[T-1],i1,j1,Prev,penalty,BLOSUM); } endI = i+1; endJ = j+1; i2 = L->maxi; j2 = L->maxj; printf("Alignment\n"); while( Prev[i2][j2] != 0 && i2 != -1 && j2 != -1 && i>=0 && j>=0){ beginI = i+1; beginJ = j+1; if(i2>=0 && j2>=0 && Prev[i2][j2] == 3){ Str1 = Cons('-',Str1); Str2 = Cons(String1[i],Str2); space++; i = i - 1; i2 = i2 - 1; } if(i2>=0 && j2>=0 && Prev[i2][j2] == 2 ){ Str1 = Cons(String2[j],Str1); Str2 = Cons(String1[i],Str2); i = i - 1; j = j - 1; i2 = i2 - 1; j2 = j2 - 1; if (Str1->inf == Str2->inf) sim++; else dif++; } if (i2>=0 && j2>=0 && Prev[i2][j2] == 1) { Str2 = Cons('-',Str2); Str1 = Cons(String2[j],Str1); space++; j = j - 1; j2 = j2 -1; } if (i2 == -1 || j2 == -1){ i3 = i1; j3 = j1; i1 = i/T; j1 = j/T; if ( i>=0 && j>=0 ){ if ( i1 == 0) if ( j1 == 0) { //clear(i1,j1,i3,j3,mat); Prev = recollect(row1,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,i3,j3,mat); Prev = recollect(row1,mat[i1][j1-1].col,0,i1,j1,Prev,penalty,BLOSUM); } else if ( j1 == 0) { //clear(i1,j1,i3,j3,mat); Prev = recollect(mat[i1-1][j1].row,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,i3,j3,mat); Prev = recollect(mat[i1-1][j1].row,mat[i1][j1-1].col,mat[i1-1][j1-1].col[T-1],i1,j1,Prev,penalty,BLOSUM); } } } if ( i2 == -1 ) i2 = T-1; if ( j2 == -1 ) j2 = T-1; } // записываем полученную выравненную строку S1 в файл // newS1.txt, каждую новую выравненную строку записываем // с новой строки в файле и ставим перед ней '*' printf("\n First string : \n"); fileS1 = write_to_file(fileS1, Str2,beginI,endI); // записываем полученную выравненную строку S2 в файл // newS2.txt, каждую новую выравненную строку записываем // с новой строки в файле и ставим перед ней '*' printf("\n Second string : \n"); fileS2 = write_to_file(fileS2,Str1,beginJ,endJ); printf("\n Score = %i ",score); printf(" \n Simularities = %i", sim); printf("\n Differences = %i ",dif); printf(" \n Spaces = %i \n", space); printf(" Position of alignment at First string %i %i",beginI,endI); printf(" \n Position of alignment at Second string %i %i \n",beginJ,endJ); L = L ->next; } } time2 = clock(); printf("\n Time = %i ", ( time2-time1 )); fclose(fileS1); fclose(fileS2); for ( i=0; i< 4;i++) free (BLOSUM[i]); free (BLOSUM); for ( j=0; j<T;j++) free (Prev[j]); free (Prev); for ( j=0; j< Kol_block_v_stolbce;j++) free (mat[j]); free (mat); system("pause"); return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(4*t2-Nz-508,512)),ceild(32*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t2+Nx,512),floord(Nt+Nx-4,512)),floord(2*t1+Nx+1,512)),floord(32*t3+Nx+28,512)),floord(4*t1-4*t2+Nz+Nx-1,512));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),512*t4+510),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ttables.h
// Copyright 2013 by Chris Dyer // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef _TTABLES_H_ #define _TTABLES_H_ #include <cassert> #include <cmath> #include <fstream> #include <iostream> #include <vector> #include "src/hashtables.h" #include "src/corpus.h" struct Md { static double digamma(double x) { double result = 0, xx, xx2, xx4; for ( ; x < 7; ++x) result -= 1/x; x -= 1.0/2.0; xx = 1.0/x; xx2 = xx*xx; xx4 = xx2*xx2; result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4; return result; } static inline double log_poisson(unsigned x, const double& lambda) { assert(lambda > 0.0); return std::log(lambda) * x - lgamma(x + 1) - lambda; } }; class TTable { public: TTable() : frozen_(false), probs_initialized_(false) {} // typedef std::unordered_map<unsigned, double> Word2Double; typedef std::vector<Word2Double> Word2Word2Double; inline double prob(const unsigned e, const unsigned f) const { return probs_initialized_ ? ttable[e].find(f)->second : 1e-9; } inline double safe_prob(const int& e, const int& f) const { if (e < static_cast<int>(ttable.size())) { const Word2Double& cpd = ttable[e]; const Word2Double::const_iterator it = cpd.find(f); if (it == cpd.end()) return 1e-9; return it->second; } else { return 1e-9; } } inline void SetMaxE(const unsigned e) { // NOT thread safe if (e >= counts.size()) counts.resize(e + 1); } inline void Insert(const unsigned e, const unsigned f) { // NOT thread safe if (e >= counts.size()) counts.resize(e + 1); counts[e][f] = 0; } inline void Increment(const unsigned e, const unsigned f, const double x) { counts[e].find(f)->second += x; // Ignore race conditions here. } void NormalizeVB(const double alpha) { ttable.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned i = 0; i < ttable.size(); ++i) { double tot = 0; Word2Double& cpd = ttable[i]; for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) tot += it->second + alpha; if (!tot) tot = 1; const double digamma_tot = Md::digamma(tot); for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) it->second = exp(Md::digamma(it->second + alpha) - digamma_tot); } ClearCounts(); probs_initialized_ = true; } void Normalize() { ttable.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned i = 0; i < ttable.size(); ++i) { double tot = 0; Word2Double& cpd = ttable[i]; for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) tot += it->second; if (!tot) tot = 1; for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) it->second /= tot; } ClearCounts(); probs_initialized_ = true; } void Freeze() { // duplicate all values in counts into ttable // later updates to both are semi-threadsafe assert (!frozen_); if (!frozen_) { ttable.resize(counts.size()); for (unsigned i = 0; i < counts.size(); ++i) { ttable[i] = counts[i]; } } frozen_ = true; } // adds counts from another TTable - probabilities remain unchanged TTable& operator+=(const TTable& rhs) { if (rhs.counts.size() > counts.size()) counts.resize(rhs.counts.size()); for (unsigned i = 0; i < rhs.counts.size(); ++i) { const Word2Double& cpd = rhs.counts[i]; Word2Double& tgt = counts[i]; for (Word2Double::const_iterator j = cpd.begin(); j != cpd.end(); ++j) { tgt[j->first] += j->second; } } return *this; } void ExportToFile(const char* filename, Dict& d, double BEAM_THRESHOLD) const { std::ofstream file(filename); for (unsigned i = 0; i < ttable.size(); ++i) { const std::string& a = d.Convert(i); const Word2Double& cpd = ttable[i]; double max_p = -1; for (auto& it : cpd) if (it.second > max_p) max_p = it.second; const double threshold = - log(max_p) * BEAM_THRESHOLD; for (auto& it : cpd) { const std::string& b = d.Convert(it.first); double c = log(it.second); if (c >= threshold) file << a << '\t' << b << '\t' << it.second << std::endl; } } file.close(); } void EntropyExportToFile(const char* filename, Dict& d, double BEAM_THRESHOLD) const { std::ofstream file(filename); for (unsigned i = 0; i < ttable.size(); ++i) { const std::string& a = d.Convert(i); const Word2Double& cpd = ttable[i]; double h = 0; for (auto& it : cpd) { double c = log(it.second); h += it.second * c; } file << a << '\t' << -h << std::endl; } file.close(); } private: void ClearCounts() { #pragma omp parallel for schedule(dynamic) for (size_t i=0; i<counts.size();++i) { for (auto& cnt : counts[i]) { cnt.second = 0.0; } } } Word2Word2Double ttable; Word2Word2Double counts; bool frozen_; // Disallow new e,f pairs to be added to counts bool probs_initialized_; // If we can use the values in probs public: void DeserializeLogProbsFromText(std::istream* in, Dict& d); }; #endif
demos.h
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Include/demos.h: include file for all demo programs //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #ifndef GRAPHBLAS_DEMOS_H #define GRAPHBLAS_DEMOS_H #include "GraphBLAS.h" #include "simple_rand.h" #include "simple_timer.h" #include "usercomplex.h" #ifdef MATLAB_MEX_FILE #include "mex.h" #include "matrix.h" #define malloc mxMalloc #define free mxFree #define calloc mxCalloc #define realloc mxRealloc #endif //------------------------------------------------------------------------------ // manage compiler warnings //------------------------------------------------------------------------------ #if defined __INTEL_COMPILER #pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 ) #elif defined __GNUC__ #pragma GCC diagnostic ignored "-Wunknown-pragmas" #pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wformat-truncation=" #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wunused-result" #pragma GCC diagnostic ignored "-Wint-in-bool-context" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wtype-limits" #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" // enable these warnings as errors #pragma GCC diagnostic error "-Wmisleading-indentation" #pragma GCC diagnostic error "-Wswitch-default" #endif #undef MIN #undef MAX #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX(a,b) (((a) > (b)) ? (a) : (b)) GrB_Info bfs5m // BFS of a graph (using vector assign & reduce) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs5m_check // BFS of a graph (using vector assign & reduce) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs6 // BFS of a graph (using apply) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph const GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs6_check // BFS of a graph (using apply) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph const GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info read_matrix // read a double-precision matrix ( GrB_Matrix *A, // handle of matrix to create FILE *f, // file to read the tuples from bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then remove self edges from A bool one_based, // if true, input matrix is 1-based bool boolean, // if true, input is GrB_BOOL, otherwise GrB_FP64 bool printstuff // if true, print status to stdout ) ; GrB_Info mis // compute a maximal independent set ( GrB_Vector *iset_output, // iset(i) = true if i is in the set const GrB_Matrix A // symmetric Boolean matrix ) ; GrB_Info mis_check // compute a maximal independent set ( GrB_Vector *iset_output, // iset(i) = true if i is in the set const GrB_Matrix A // symmetric Boolean matrix ) ; void mis_score (float *result, uint32_t *degree) ; extern int32_t level ; #pragma omp threadprivate(level) void bfs_level (int32_t *result, bool *element) ; GrB_Info random_matrix // create a random double-precision matrix ( GrB_Matrix *A_output, // handle of matrix to create bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then do not create self edges int64_t nrows, // number of rows int64_t ncols, // number of columns int64_t ntuples, // number of entries (x2 if made symmetric) int method, // method to use: 0:setElement, 1:build bool A_complex // if true, create a Complex matrix ) ; GrB_Info get_matrix // get a matrix from stdin, or create random one ( GrB_Matrix *A_output, // matrix to create int argc, // command-line arguments char **argv, bool no_self_edges, // if true, ensure the matrix has no self-edges bool boolean // if true, file is read as GrB_BOOL, else GrB_FP64 ) ; GrB_Info wathen // construct a random Wathen matrix ( GrB_Matrix *A_output, // output matrix int64_t nx, // grid dimension nx int64_t ny, // grid dimension ny bool scale, // if true, scale the rows int method, // 0 to 3 double *rho_given // nx-by-ny dense matrix, if NULL use random rho ) ; GrB_Info triu // C = triu (A,1) ( GrB_Matrix *C_output, // output matrix const GrB_Matrix A // input matrix, boolean or double ) ; GrB_Info tricount // count # of triangles ( int64_t *ntri, // # of triangles in the graph const int method, // 0 to 4, see above const GrB_Matrix A, // adjacency matrix for methods 0, 1, and 2 const GrB_Matrix E, // edge incidence matrix for method 0 const GrB_Matrix L, // L=tril(A) for methods 2, 4, and 4 const GrB_Matrix U, // U=triu(A) for methods 2, 3, and 5 double t [2] // t [0]: multiply time, t [1]: reduce time ) ; //------------------------------------------------------------------------------ // page rank //------------------------------------------------------------------------------ // dpagerank computes an array of structs for its result typedef struct { double pagerank ; // the pagerank of a node GrB_Index page ; // the node number itself } PageRank ; // ipagerank computes an array of structs for its result typedef struct { uint64_t pagerank ; // the pagerank of a node GrB_Index page ; // the node number itself } iPageRank ; // using a standard semiring and FP64 arithmetic GrB_Info dpagerank // GrB_SUCCESS or error condition ( PageRank **Phandle, // output: pointer to array of PageRank structs GrB_Matrix A ) ; // like dpagerank but with user-defined type, operators, and semiring; // also a stopping critirion GrB_Info dpagerank2 // GrB_SUCCESS or error condition ( PageRank **Phandle, // output: pointer to array of PageRank structs GrB_Matrix A, // input graph, not modified int itermax, // max number of iterations double tol, // stop when norm (r-rnew,2) < tol int *iters, // number of iterations taken GrB_Desc_Value method // method to use for GrB_vxm (for testing only) ) ; GrB_Info drowscale // GrB_SUCCESS or error condition ( GrB_Matrix *Chandle, // output matrix C = rowscale (A) GrB_Matrix A // input matrix, not modified ) ; GrB_Info ipagerank // GrB_SUCCESS or error condition ( iPageRank **Phandle, // output: pointer to array of iPageRank structs GrB_Matrix A // input graph, not modified ) ; GrB_Info irowscale // GrB_SUCCESS or error condition ( GrB_Matrix *Chandle, // output matrix C = rowscale (A) GrB_Matrix A // input matrix, not modified ) ; // multiplicative scaling factor for ipagerank, ZSCALE = 2^30 #define ZSCALE ((uint64_t) 1073741824) //------------------------------------------------------------------------------ // CHECK: expr must be true; if not, return an error condition //------------------------------------------------------------------------------ // the #include'ing file must define the FREE_ALL macro #define CHECK(expr,info) \ { \ if (! (expr)) \ { \ /* free the result and all workspace, and return NULL */ \ FREE_ALL ; \ printf ("Failure: line %d file %s\n", __LINE__, __FILE__) ; \ return (info) ; \ } \ } //------------------------------------------------------------------------------ // OK: call a GraphBLAS method and check the result //------------------------------------------------------------------------------ // OK(method) is a macro that calls a GraphBLAS method and checks the status; // if a failure occurs, it handles the error via the CHECK macro above, and // returns the error status to the caller. #define OK(method) \ { \ info = method ; \ if (info != GrB_SUCCESS) \ { \ printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \ CHECK (false, info) ; \ } \ } #endif
nndes.h
/* Copyright (C) 2010,2011 Wei Dong <wdong.pku@gmail.com>. All Rights Reserved. DISTRIBUTION OF THIS PROGRAM IN EITHER BINARY OR SOURCE CODE FORM MUST BE PERMITTED BY THE AUTHOR. */ #ifndef WDONG_NNDESCENT #define WDONG_NNDESCENT #include "nndes-common.h" namespace nndes { using std::cerr; using std::vector; using std::swap; using boost::progress_display; #ifndef NNDES_SHOW_PROGRESS #define NNDES_SHOW_PROGRESS 1 #endif // Normally one would use GRAPH_BOTH, // GRAPH_KNN & GRAPH_RNN are for experiments only. static const int GRAPH_NONE = 0, GRAPH_KNN = 1, GRAPH_RNN = 2, GRAPH_BOTH = 4; typedef int GraphOption; // The main NN-Descent class. // Instead of the actual dataset, the class takes a distance oracle // as input. Given two data item ids, the oracle returns the distance // between the two. template <typename ORACLE> class NNDescent { private: const ORACLE &oracle; int N; // # points int K; // K-NN to find int S; // # of NNs to use for exploration GraphOption option; vector<KNN> nn; // K-NN approximation // We maintain old and newly added KNN/RNN items // separately for incremental processing: // we need to compare two new ones // and a new one to an old one, but not two old ones as they // must have been compared already. vector<vector<int> > nn_old; vector<vector<int> > nn_new; vector<vector<int> > rnn_old; vector<vector<int> > rnn_new; // total number of comparisons done. long long int cost; // This function decides of it's necessary to compare two // points. Obviously a point should not compare against itself. // Another potential usage of this function is to record all // pairs that have already be compared, so that when seen in the future, // then same pair doesn't have be compared again. bool mark (int p1, int p2) { return p1 == p2; } // Compare two points and update their KNN list of necessary. // Return the number of comparisons done (0 or 1). int update (int p1, int p2) { if (mark(p1, p2)) return 0; // KNN::update is synchronized by a lock // keep an order is necessary to avoid deadlock. if (p1 > p2) swap(p1, p2); float dist = oracle(p1, p2); nn[p1].update(KNN::Element(p2, dist, true)); nn[p2].update(KNN::Element(p1, dist, true)); return 1; } public: const vector<KNN> &getNN() const { return nn; } long long int getCost () const { return cost; } NNDescent (int N_, int K_, float S_, const ORACLE &oracle_, GraphOption opt = GRAPH_BOTH) : oracle(oracle_), N(N_), K(K_), S(K * S_), option(opt), nn(N_), nn_old(N_), nn_new(N_), rnn_old(N_), rnn_new(N_), cost(0) { for (int i = 0; i < N; ++i) { nn[i].init(K); // random initial edges if ((option & GRAPH_KNN) || (option & GRAPH_BOTH)) { nn_new[i].resize(S); BOOST_FOREACH(int &u, nn_new[i]) { u = rand() % N; } } if ((option & GRAPH_RNN) || (option & GRAPH_BOTH)) { rnn_new[i].resize(S); BOOST_FOREACH(int &u, rnn_new[i]) { u = rand() % N; } } } } // An iteration contains two parts: // local join // identify the newly detected NNs. int iterate () { #if NNDES_SHOW_PROGRESS progress_display progress(N, cerr); #endif long long int cc = 0; // local joins #pragma omp parallel for default(shared) reduction(+:cc) for (int i = 0; i < N; ++i) { // The following loops are bloated to deal with all // the experimental setups. Otherwise they should // be really simple. if (option & (GRAPH_KNN | GRAPH_BOTH)) { BOOST_FOREACH(int j, nn_new[i]) { BOOST_FOREACH(int k, nn_new[i]) { if (j >= k) continue; cc += update(j, k); } BOOST_FOREACH(int k, nn_old[i]) { cc += update(j, k); } } } if (option & (GRAPH_RNN | GRAPH_BOTH)) { BOOST_FOREACH(int j, rnn_new[i]) { BOOST_FOREACH(int k, rnn_new[i]) { if (j >= k) continue; cc += update(j, k); } BOOST_FOREACH(int k, rnn_old[i]) { cc += update(j, k); } } } if (option & GRAPH_BOTH) { BOOST_FOREACH(int j, nn_new[i]) { BOOST_FOREACH(int k, rnn_old[i]) { cc += update(j, k); } BOOST_FOREACH(int k, rnn_new[i]) { cc += update(j, k); } } BOOST_FOREACH(int j, nn_old[i]) { BOOST_FOREACH(int k, rnn_new[i]) { cc += update(j, k); } } } #if NNDES_SHOW_PROGRESS #pragma omp critical ++progress; #endif } cost += cc; int t = 0; //#pragma omp parallel for default(shared) reduction(+:t) for (int i = 0; i < N; ++i) { nn_old[i].clear(); nn_new[i].clear(); rnn_old[i].clear(); rnn_new[i].clear(); // find the new ones for (int j = 0; j < K; ++j) { KNN::Element &e = nn[i][j]; if (e.key == KNN::Element::BAD) continue; if (e.flag){ nn_new[i].push_back(j); } else { nn_old[i].push_back(e.key); } } t += nn_new[i].size(); // sample if (nn_new[i].size() > unsigned(S)) { random_shuffle(nn_new[i].begin(), nn_new[i].end()); nn_new[i].resize(S); } BOOST_FOREACH(int &v, nn_new[i]) { nn[i][v].flag = false; v = nn[i][v].key; } } // symmetrize if ((option & GRAPH_RNN) || (option & GRAPH_BOTH)) { for (int i = 0; i < N; ++i) { BOOST_FOREACH(int e, nn_old[i]) { rnn_old[e].push_back(i); } BOOST_FOREACH(int e, nn_new[i]) { rnn_new[e].push_back(i); } } } //#pragma omp parallel for default(shared) reduction(+:t) for (int i = 0; i < N; ++i) { if (rnn_old[i].size() > unsigned(S)) { random_shuffle(rnn_old[i].begin(), rnn_old[i].end()); rnn_old[i].resize(S); } if (rnn_new[i].size() > unsigned(S)) { random_shuffle(rnn_new[i].begin(), rnn_new[i].end()); rnn_new[i].resize(S); } } return t; } }; } #endif
index.h
#ifndef GBWTGRAPH_CONSTRUCTION_H #define GBWTGRAPH_CONSTRUCTION_H #include <cstdlib> #include <functional> #include <omp.h> #include <gbwtgraph/gbwtgraph.h> #include <gbwtgraph/minimizer.h> /* index.h: Minimizer index construction from GBWTGraph. */ namespace gbwtgraph { //------------------------------------------------------------------------------ /* Index the haplotypes in the graph. Insert the minimizers into the provided index. Function argument get_payload is used to generate the payload for each position stored in the index. The number of threads can be set through OMP. */ template<class KeyType> void index_haplotypes(const GBWTGraph& graph, MinimizerIndex<KeyType>& index, const std::function<payload_type(const pos_t&)>& get_payload) { typedef typename MinimizerIndex<KeyType>::minimizer_type minimizer_type; int threads = omp_get_max_threads(); // Minimizer caching. We only generate the payloads after we have removed duplicate positions. std::vector<std::vector<std::pair<minimizer_type, pos_t>>> cache(threads); constexpr size_t MINIMIZER_CACHE_SIZE = 1024; auto flush_cache = [&](int thread_id) { std::vector<std::pair<minimizer_type, pos_t>>& current_cache = cache[thread_id]; gbwt::removeDuplicates(current_cache, false); std::vector<payload_type> payload; payload.reserve(current_cache.size()); for(size_t i = 0; i < current_cache.size(); i++) { payload.push_back(get_payload(current_cache[i].second)); } #pragma omp critical (minimizer_index) { for(size_t i = 0; i < current_cache.size(); i++) { index.insert(current_cache[i].first, current_cache[i].second, payload[i]); } } cache[thread_id].clear(); }; // Minimizer finding. auto find_minimizers = [&](const std::vector<handle_t>& traversal, const std::string& seq) { std::vector<minimizer_type> minimizers = index.minimizers(seq); // Calls syncmers() when appropriate. auto iter = traversal.begin(); size_t node_start = 0; int thread_id = omp_get_thread_num(); for(minimizer_type& minimizer : minimizers) { if(minimizer.empty()) { continue; } // Find the node covering minimizer starting position. size_t node_length = graph.get_length(*iter); while(node_start + node_length <= minimizer.offset) { node_start += node_length; ++iter; node_length = graph.get_length(*iter); } pos_t pos { graph.get_id(*iter), graph.get_is_reverse(*iter), minimizer.offset - node_start }; if(minimizer.is_reverse) { pos = reverse_base_pos(pos, node_length); } if(!Position::valid_offset(pos)) { #pragma omp critical (cerr) { std::cerr << "index_haplotypes(): Node offset " << offset(pos) << " is too large" << std::endl; } std::exit(EXIT_FAILURE); } cache[thread_id].emplace_back(minimizer, pos); } if(cache[thread_id].size() >= MINIMIZER_CACHE_SIZE) { flush_cache(thread_id); } }; /* Index the minimizers. We do a lot of redundant work by traversing both orientations and finding almost the same minimizers in each orientation. If we consider only the windows starting in forward (reverse) orientation, we may skip windows that cross from a reverse node to a forward node (from a forward node to a reverse node). */ for_each_haplotype_window(graph, index.window_bp(), find_minimizers, (threads > 1)); for(int thread_id = 0; thread_id < threads; thread_id++) { flush_cache(thread_id); } } //------------------------------------------------------------------------------ } // namespace gbwtgraph #endif // GBWTGRAPH_CONSTRUCTION_H
GB_unaryop__minv_uint32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_int8 // op(A') function: GB_tran__minv_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_int8 ( uint32_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include <type_traits> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template<typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a)/b); } template<typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a)/b); } }; struct log_softmax_fwd { template<typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template<typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length == nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t len = static_cast<index_t>(length[i]); index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < len; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } for (index_t j = len; j < M; ++j) { out[base + j*sa] = OType(0.0f); } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } } struct softmax_bwd { template<typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template<typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out)*sum); } template<typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out)*sum); } }; template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length != nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); index_t len = static_cast<index_t>(length[i]); AType sum = AType(0); for (index_t j = 0; j < len; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } } #ifdef __CUDACC__ template<int x_bits, typename OP, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> __global__ void softmax_compute_kernel(DType *in, OType *out, IType *length, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]); red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < len; i += x_size) { smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); DType smax = smem[0]; __syncthreads(); red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < len; i += x_size) { val = negate ? -in[base + i*sa]:in[base + i*sa]; smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); for (index_t i = x; i < M; i += x_size) { val = negate ? -in[base + i*sa] : in[base + i*sa]; out[base + i*sa] = (i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f); } } const int softmax_threads_per_block = 512; template<typename OP, bool negate, typename AType, typename LType, typename DType, typename OType, typename IType> __global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length, const index_t M, const double temperature, const int rows_per_block, const index_t total_rows) { __shared__ AType scratch[softmax_threads_per_block]; __shared__ LType persistent_storage[20 * 1024 / sizeof(LType)]; const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; const int entries_per_load = sizeof(LType)/sizeof(DType); const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]); // Due to usage of MSHADOW_TYPE_SWITCH macro we are generating // kernels where sizeof(LType) may be less than sizeof(DType), // resulting in entries_per_load being 0. // This is not a valid combination and is being checked against // in the launcher code. This switch here is just to silence // the division by zero warning generated for such invalid cases. const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const LType* in_aligned = reinterpret_cast<const LType*>(in); size_t base = my_row * row_length; for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length + i] = in_aligned[base + i]; } DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length); __syncthreads(); DType my_max_value; red::maximum::SetInitValue(my_max_value); for (index_t i = my_id; i < len; i += threads_per_row) { my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]); } scratch[threadIdx.x] = my_max_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]); } __syncthreads(); } if (my_id < warp_size) { AType my_value = warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return ::max(x, y); }); scratch[threadIdx.x] = my_value; } __syncthreads(); DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); AType my_sum; red::sum::SetInitValue(my_sum); for (index_t i = my_id; i < len; i += threads_per_row) { const DType val = negate ? -row[i] : row[i]; my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } scratch[threadIdx.x] = my_sum; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] += scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y;}); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -row[i] : row[i]; row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : DType(0.0f); } __syncthreads(); LType* out_aligned = reinterpret_cast<LType*>(out); for (index_t i = my_id; i < row_length; i += threads_per_row) { out_aligned[base + i] = persistent_storage[my_local_row * row_length + i]; } } template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using 20 kB of shared memory for persistent storage in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { int rows_per_block = mxnet::common::cuda::get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); int nblocks = (N + rows_per_block - 1) / rows_per_block; CHECK_LE(sizeof(DType), sizeof(LType)); softmax_stride1_compute_kernel<OP, negate, AType, LType> <<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, length, M, temperature, rows_per_block, N); }); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel); } else { softmax_compute_kernel<x_bits, OP, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, length, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType, typename DType, typename OType, typename IType> __global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd, DType *igrad, const IType *length, const index_t M, const double temperature, const int rows_per_block, const index_t total_rows) { __shared__ AType scratch[softmax_threads_per_block]; __shared__ LType persistent_storage[20 * 1024 / sizeof(LType)]; const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; const int entries_per_load = sizeof(LType)/sizeof(DType); const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]); // Due to usage of MSHADOW_TYPE_SWITCH macro we are generating // kernels where sizeof(LType) may be less than sizeof(DType), // resulting in entries_per_load being 0. // This is not a valid combination and is being checked against // in the launcher code. This switch here is just to silence // the division by zero warning generated for such invalid cases. const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const LType* out_aligned = reinterpret_cast<const LType*>(out); const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd); size_t base = my_row * row_length; for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i]; persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i]; } DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2); __syncthreads(); AType my_sum_value; red::sum::SetInitValue(my_sum_value); for (index_t i = my_id; i < len; i += threads_per_row) { my_sum_value += OP1::Map(row[i + M], row[i]); } scratch[threadIdx.x] = my_sum_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -OP2::Map(row[i + M], row[i], ssum) : OP2::Map(row[i + M], row[i], ssum); row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) : DType(0.0f); if (Req == kAddTo) { row[i] += igrad[my_row * M + i]; } } __syncthreads(); LType* igrad_aligned = reinterpret_cast<LType*>(igrad); for (index_t i = my_id; i < row_length; i += threads_per_row) { igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i]; } } template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> __global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad, const IType *length, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < len; i += x_size) { smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { final_result = negate ? -OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) : OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); final_result = (i < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature)); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using 20 kB of shared memory for persistent storage in the optimized case // Need to store both out and ograd, so M can be only half compared to // forward pass. const size_t max_opt_M = 20 * 1024 / DSize / 2; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { int rows_per_block = mxnet::common::cuda::get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); int nblocks = (N + rows_per_block - 1) / rows_per_block; CHECK_LE(sizeof(DType), sizeof(LType)); softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType> <<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, length, M, temperature, rows_per_block, N); }); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel); } else { softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, length, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel); } } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> use_length; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe("DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); DMLC_DECLARE_FIELD(use_length) .set_default(dmlc::optional<bool>(false)) .describe("Whether to use the length input as a mask over the data input."); } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.use_length.value(); } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } } static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U); if (param.use_length.value()) { mxnet::TShape& dshape = in_attrs->at(0); mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1); int j = 0; int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1; for (int i = 0; i < dshape.ndim(); ++i) { if (i != axis) { tmp_shape[j++] = dshape[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape); } mxnet::ShapeVector tmp = {in_attrs->at(0)}; return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs); } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)}; mxnet::ShapeVector dgrad = {out_attrs->at(0)}; bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad); SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]); SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]); SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]); mxnet::ShapeVector length = {in_attrs->at(2)}; mxnet::ShapeVector lgrad = {out_attrs->at(1)}; res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad)); SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]); return res; } else { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); if (softmax_use_length(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2)); } return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 && (*out_attrs)[1] != -1 && (*in_attrs)[1] != -1; } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int> > SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}}; } } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { return softmax_use_length(attrs) ? 4 : 3; } return 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::string>{"ograd", "data", "length", "output"}; } else { return std::vector<std::string>{"ograd", "data", "output"}; } } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char *op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) { return ElemwiseGradUseInOut {op_name}(n, ograds); } else { return ElemwiseGradUseOut {op_name}(n, ograds); } } }; template<typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, { int type = kInt32; if (param.use_length.value()) { CHECK(inputs.size() > 1) << "Mask needs to be provided when using softmax with use_length=True."; type = inputs[1].type_flag_; } MXNET_INT32_INT64_TYPE_SWITCH(type, IType, { IType* mask_ptr = nullptr; if (param.use_length.value()) { mask_ptr = inputs[1].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); } template<typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (softmax_use_length(attrs)) { MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, { if (req[1] != kNullOp) { mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch( ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>()); } }); } if (req[0] == kNullOp) return; const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; out_idx = softmax_use_length(attrs) ? 3 : out_idx; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, { IType * length_ptr = nullptr; if (softmax_use_length(attrs)) { length_ptr = inputs[2].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
sections.c
#include <stdio.h> #include <unistd.h> #include <omp.h> #define N 20 int main() { int tid; omp_set_num_threads(4); #pragma omp parallel private(tid) { tid = omp_get_thread_num(); printf("Hello %d\n", tid); #pragma omp sections { #pragma omp section { printf("Seção 1 - thread %d\n", tid); sleep(1); } #pragma omp section { printf("Seção 2 - thread %d\n", tid); sleep(1); } #pragma omp section { printf("Seção 3 - thread %d\n", tid); sleep(1); } #pragma omp section { printf("Seção 4 - thread %d\n", tid); sleep(1); } } } return 0; }
7942.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) { int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 1000; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12]; #pragma omp parallel for private(t4,t6,t8,t10,t12) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12]; } }
homomorphic_comparison_modified.c
struct plaintext_keyword { int bits[TABLE_CONTENT_SIZE][1024]; }; struct plaintext_keyword init_keyword(struct plaintext_keyword ptext) { int i, j; for(j=0; j<TABLE_CONTENT_SIZE; j++) for(i=0; i<1024; i++) ptext.bits[j][i]=0; return(ptext); } struct plaintext { int bits[TABLE_CONTENT_SIZE][1024]; }; struct plaintext init_plaintext(struct plaintext ptext) { int i, j; for(j=0; j<TABLE_CONTENT_SIZE; j++) for(i=0; i<1024; i++) ptext.bits[j][i]=0; return(ptext); } struct encrypted_bit { long long int c0[5][1024], c1[5][1024]; }; struct encrypted_keyword { struct encrypted_bit bits[TABLE_CONTENT_SIZE]; }; struct encrypted_data { struct encrypted_bit bits[TABLE_CONTENT_SIZE]; }; struct encrypted_data_8bit { struct encrypted_bit bits[5]; }; struct window_table_entry { struct encrypted_bit bits[2]; }; struct window_table { struct window_table_entry window_table_entries[32]; }; void copy_encrypted_bit(struct encrypted_bit *bit_in, struct encrypted_bit *bit_out) { int i, j; for(i=0; i<5; i++) // copy cout in bit_in { for(j=0; j<1024; j++) { bit_out->c0[i][j] = bit_in->c0[i][j]; bit_out->c1[i][j] = bit_in->c1[i][j]; } } } void copy_encrypted_keyword(struct encrypted_keyword *in, struct encrypted_keyword *out) { int k; for(k=0; k<TABLE_CONTENT_SIZE; k++) copy_encrypted_bit(&in->bits[k], &out->bits[k]); } void copy_encrypted_data(struct encrypted_data *in, struct encrypted_data *out) { int k; for(k=0; k<TABLE_CONTENT_SIZE; k++) copy_encrypted_bit(&in->bits[k], &out->bits[k]); } struct encrypted_data ed_const_one, ed_const_allone; struct encrypted_keyword encrypt_keyword(struct plaintext_keyword ptext, struct encrypted_keyword ed) { int i; for(i=0; i<TABLE_CONTENT_SIZE; i++) FV_enc_q(ptext.bits[i], ed.bits[i].c0, ed.bits[i].c1); return(ed); } struct encrypted_data encrypt_data(struct plaintext ptext, struct encrypted_data ed) { int i; for(i=0; i<TABLE_CONTENT_SIZE; i++) FV_enc_q(ptext.bits[i], ed.bits[i].c0, ed.bits[i].c1); return(ed); } struct plaintext_keyword decrypt_keyword(struct plaintext_keyword ptext, struct encrypted_keyword ed) { int i; for(i=0; i<10; i++) FV_dec_q(ptext.bits[i], ed.bits[i].c0, ed.bits[i].c1); return(ptext); } void decrypt_data(struct encrypted_data ed, unsigned char decoded_string[]) { int i; struct plaintext ptext; init_plaintext(ptext); unsigned char decoded_char = 0; for(i=TABLE_CONTENT_SIZE-1; i>=0; i--) { FV_dec_q(ptext.bits[0], ed.bits[i].c0, ed.bits[i].c1); decoded_char = 2*decoded_char + ptext.bits[0][0]; if(i%8==0) { decoded_string[i/8] = decoded_char; decoded_char = 0; } } } struct encrypted_bit encryption_of_bit_zero, encryption_of_bit_one; struct window_table wt; void homomorphic_search_precomputation(struct encrypted_keyword ed, int index) { int i, j, bit; struct encrypted_data_8bit window; // 8-bit windows are formed for all possible combinations of the table_entry bits struct encrypted_data_8bit result; // is the addition of (window + ed_window) struct encrypted_bit multiplication_result; // is the multiplication of the bits of result struct encrypted_data_8bit window_ed; int thread_num; // copy of the bits of ed in window_ed. for(i=0; i<5; i++) copy_encrypted_bit(&ed.bits[5*index+i], &window_ed.bits[i]); //#pragma omp parallel for private(j, bit, window, result, multiplication_result) for(i=0; i<32; i++) { for(j=0; j<5; j++) { bit = (i>>j)%2; if(bit==0) copy_encrypted_bit(&encryption_of_bit_zero, &window.bits[j]); else copy_encrypted_bit(&encryption_of_bit_one, &window.bits[j]); } for(j=0; j<5; j++) { FV_add(window_ed.bits[j].c0, window_ed.bits[j].c1, window.bits[j].c0, window.bits[j].c1, result.bits[j].c0, result.bits[j].c1); if(IMPLEMENTATION_TYPE==0) FV_recrypt1_HW(result.bits[j].c0, result.bits[j].c1); else FV_recrypt1(result.bits[j].c0, result.bits[j].c1); } for(j=1; j<5; j++) { FV_mul(result.bits[j-1].c0, result.bits[j-1].c1, result.bits[j].c0, result.bits[j].c1, multiplication_result.c0, multiplication_result.c1); if(IMPLEMENTATION_TYPE==0) FV_recrypt1_HW(multiplication_result.c0, multiplication_result.c1); else FV_recrypt1(multiplication_result.c0, multiplication_result.c1); if(j<4) copy_encrypted_bit(&multiplication_result, &result.bits[j]); } copy_encrypted_bit(&multiplication_result, &wt.window_table_entries[i].bits[index]); } } struct encrypted_data homomorphic_search(struct encrypted_keyword keyword) { int index; int thread_num; int i, j, k, window_index0, window_index1, window_index2, window_index3; unsigned int table_content; unsigned char table_row[GENOMIC_STRING_LENGTH]; struct encrypted_bit multiplication_result0, multiplication_result1; // is the multiplication of the bits of result struct encrypted_data acc[THREADS]; // this accumulates the sum of encrypted contents struct encrypted_data acc_sum; printf("\n[SERVER] Performing encrypted search ...\n"); for(index=0; index<2; index++) homomorphic_search_precomputation(keyword, index); //printf("Precomp done\n"); struct plaintext ptext; for(i=0; i<THREADS; i++) { //printf("i=%d\n", i); init_plaintext(ptext); acc[i] = encrypt_data(ptext, acc[i]); } ptext=init_plaintext(ptext); acc_sum = encrypt_data(ptext, acc_sum); #pragma omp parallel i = omp_get_num_threads(); //printf("num of threads %d\n", i); // searching starts with the table entries #pragma omp parallel for private(window_index0, window_index1, window_index2, window_index3, multiplication_result0, multiplication_result1, table_content, j, k, thread_num, table_row) for(i=0; i<256*4; i++) // assuming 40 threads, this boundary 16384 is a multiple of 4 { thread_num = omp_get_thread_num(); window_index0 = i & 31; window_index1 = (i>>5) & 31; // multiplication_result0 = window_index0 & window_index1 FV_mul(wt.window_table_entries[window_index1].bits[1].c0, wt.window_table_entries[window_index1].bits[1].c1, wt.window_table_entries[window_index0].bits[0].c0, wt.window_table_entries[window_index0].bits[0].c1, multiplication_result0.c0, multiplication_result0.c1); table(i, table_row); for(j=0; j<GENOMIC_STRING_LENGTH; j++) { for(k=0; k<8; k++) { if( (table_row[j]>>k)%2 == 1 ) FV_add( acc[thread_num].bits[j*8+k].c0, acc[thread_num].bits[j*8+k].c1, multiplication_result0.c0, multiplication_result0.c1, acc[thread_num].bits[j*8+k].c0, acc[thread_num].bits[j*8+k].c1); } } } for(i=0; i<THREADS; i++) { for(j=0; j<TABLE_CONTENT_SIZE; j++) { FV_add(acc[i].bits[j].c0, acc[i].bits[j].c1, acc_sum.bits[j].c0, acc_sum.bits[j].c1, acc_sum.bits[j].c0, acc_sum.bits[j].c1); } } printf("\n[SERVER] Encrypted Search done\n"); return(acc_sum); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-4,8),ceild(4*t2-Nz-3,16));t3<=min(min(floord(4*Nt+Ny-9,16),floord(2*t1+Ny-3,16)),floord(4*t2+Ny-9,16));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(16*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(16*t3+Nx+3,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
semantics.c
/* Perform the semantic phase of parsing, i.e., the process of building tree structure, checking semantic consistency, and building RTL. These routines are used both during actual parsing and during the instantiation of template functions. Copyright (C) 1998-2018 Free Software Foundation, Inc. Written by Mark Mitchell (mmitchell@usa.net) based on code found formerly in parse.y and pt.c. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "target.h" #include "bitmap.h" #include "cp-tree.h" #include "stringpool.h" #include "cgraph.h" #include "stmt.h" #include "varasm.h" #include "stor-layout.h" #include "c-family/c-objc.h" #include "tree-inline.h" #include "intl.h" #include "tree-iterator.h" #include "omp-general.h" #include "convert.h" #include "stringpool.h" #include "attribs.h" #include "gomp-constants.h" #include "predict.h" /* There routines provide a modular interface to perform many parsing operations. They may therefore be used during actual parsing, or during template instantiation, which may be regarded as a degenerate form of parsing. */ static tree maybe_convert_cond (tree); static tree finalize_nrv_r (tree *, int *, void *); static tree capture_decltype (tree); /* Used for OpenMP non-static data member privatization. */ static hash_map<tree, tree> *omp_private_member_map; static vec<tree> omp_private_member_vec; static bool omp_private_member_ignore_next; /* Deferred Access Checking Overview --------------------------------- Most C++ expressions and declarations require access checking to be performed during parsing. However, in several cases, this has to be treated differently. For member declarations, access checking has to be deferred until more information about the declaration is known. For example: class A { typedef int X; public: X f(); }; A::X A::f(); A::X g(); When we are parsing the function return type `A::X', we don't really know if this is allowed until we parse the function name. Furthermore, some contexts require that access checking is never performed at all. These include class heads, and template instantiations. Typical use of access checking functions is described here: 1. When we enter a context that requires certain access checking mode, the function `push_deferring_access_checks' is called with DEFERRING argument specifying the desired mode. Access checking may be performed immediately (dk_no_deferred), deferred (dk_deferred), or not performed (dk_no_check). 2. When a declaration such as a type, or a variable, is encountered, the function `perform_or_defer_access_check' is called. It maintains a vector of all deferred checks. 3. The global `current_class_type' or `current_function_decl' is then setup by the parser. `enforce_access' relies on these information to check access. 4. Upon exiting the context mentioned in step 1, `perform_deferred_access_checks' is called to check all declaration stored in the vector. `pop_deferring_access_checks' is then called to restore the previous access checking mode. In case of parsing error, we simply call `pop_deferring_access_checks' without `perform_deferred_access_checks'. */ struct GTY(()) deferred_access { /* A vector representing name-lookups for which we have deferred checking access controls. We cannot check the accessibility of names used in a decl-specifier-seq until we know what is being declared because code like: class A { class B {}; B* f(); } A::B* A::f() { return 0; } is valid, even though `A::B' is not generally accessible. */ vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks; /* The current mode of access checks. */ enum deferring_kind deferring_access_checks_kind; }; /* Data for deferred access checking. */ static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack; static GTY(()) unsigned deferred_access_no_check; /* Save the current deferred access states and start deferred access checking iff DEFER_P is true. */ void push_deferring_access_checks (deferring_kind deferring) { /* For context like template instantiation, access checking disabling applies to all nested context. */ if (deferred_access_no_check || deferring == dk_no_check) deferred_access_no_check++; else { deferred_access e = {NULL, deferring}; vec_safe_push (deferred_access_stack, e); } } /* Save the current deferred access states and start deferred access checking, continuing the set of deferred checks in CHECKS. */ void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks) { push_deferring_access_checks (dk_deferred); if (!deferred_access_no_check) deferred_access_stack->last().deferred_access_checks = checks; } /* Resume deferring access checks again after we stopped doing this previously. */ void resume_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_deferred; } /* Stop deferring access checks. */ void stop_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred; } /* Discard the current deferred access checks and restore the previous states. */ void pop_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else deferred_access_stack->pop (); } /* Returns a TREE_LIST representing the deferred checks. The TREE_PURPOSE of each node is the type through which the access occurred; the TREE_VALUE is the declaration named. */ vec<deferred_access_check, va_gc> * get_deferred_access_checks (void) { if (deferred_access_no_check) return NULL; else return (deferred_access_stack->last().deferred_access_checks); } /* Take current deferred checks and combine with the previous states if we also defer checks previously. Otherwise perform checks now. */ void pop_to_parent_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else { vec<deferred_access_check, va_gc> *checks; deferred_access *ptr; checks = (deferred_access_stack->last ().deferred_access_checks); deferred_access_stack->pop (); ptr = &deferred_access_stack->last (); if (ptr->deferring_access_checks_kind == dk_no_deferred) { /* Check access. */ perform_access_checks (checks, tf_warning_or_error); } else { /* Merge with parent. */ int i, j; deferred_access_check *chk, *probe; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe) { if (probe->binfo == chk->binfo && probe->decl == chk->decl && probe->diag_decl == chk->diag_decl) goto found; } /* Insert into parent's checks. */ vec_safe_push (ptr->deferred_access_checks, *chk); found:; } } } } /* Perform the access checks in CHECKS. The TREE_PURPOSE of each node is the BINFO indicating the qualifying scope used to access the DECL node stored in the TREE_VALUE of the node. If CHECKS is empty or we aren't in SFINAE context or all the checks succeed return TRUE, otherwise FALSE. */ bool perform_access_checks (vec<deferred_access_check, va_gc> *checks, tsubst_flags_t complain) { int i; deferred_access_check *chk; location_t loc = input_location; bool ok = true; if (!checks) return true; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { input_location = chk->loc; ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain); } input_location = loc; return (complain & tf_error) ? true : ok; } /* Perform the deferred access checks. After performing the checks, we still have to keep the list `deferred_access_stack->deferred_access_checks' since we may want to check access for them again later in a different context. For example: class A { typedef int X; static X a; }; A::X A::a, x; // No error for `A::a', error for `x' We have to perform deferred access of `A::X', first with `A::a', next with `x'. Return value like perform_access_checks above. */ bool perform_deferred_access_checks (tsubst_flags_t complain) { return perform_access_checks (get_deferred_access_checks (), complain); } /* Defer checking the accessibility of DECL, when looked up in BINFO. DIAG_DECL is the declaration to use to print diagnostics. Return value like perform_access_checks above. If non-NULL, report failures to AFI. */ bool perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl, tsubst_flags_t complain, access_failure_info *afi) { int i; deferred_access *ptr; deferred_access_check *chk; /* Exit if we are in a context that no access checking is performed. */ if (deferred_access_no_check) return true; gcc_assert (TREE_CODE (binfo) == TREE_BINFO); ptr = &deferred_access_stack->last (); /* If we are not supposed to defer access checks, just check now. */ if (ptr->deferring_access_checks_kind == dk_no_deferred) { bool ok = enforce_access (binfo, decl, diag_decl, complain, afi); return (complain & tf_error) ? true : ok; } /* See if we are already going to perform this check. */ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk) { if (chk->decl == decl && chk->binfo == binfo && chk->diag_decl == diag_decl) { return true; } } /* If not, record the check. */ deferred_access_check new_access = {binfo, decl, diag_decl, input_location}; vec_safe_push (ptr->deferred_access_checks, new_access); return true; } /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return current_stmt_tree ()->stmts_are_full_exprs_p; } /* T is a statement. Add it to the statement-tree. This is the C++ version. The C/ObjC frontends have a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (EXPR_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ gcc_checking_assert (!stmt_list_stack->is_empty ()); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Returns the stmt_tree to which statements are currently being added. */ stmt_tree current_stmt_tree (void) { return (cfun ? &cfun->language->base.x_stmt_tree : &scope_chain->x_stmt_tree); } /* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */ static tree maybe_cleanup_point_expr (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr); return expr; } /* Like maybe_cleanup_point_expr except have the type of the new expression be void so we don't need to create a temporary variable to hold the inner expression. The reason why we do this is because the original type might be an aggregate and we cannot create a temporary variable for that type. */ tree maybe_cleanup_point_expr_void (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (void_type_node, expr); return expr; } /* Create a declaration statement for the declaration given by the DECL. */ void add_decl_expr (tree decl) { tree r = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); if (DECL_INITIAL (decl) || (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl)))) r = maybe_cleanup_point_expr_void (r); add_stmt (r); } /* Finish a scope. */ tree do_poplevel (tree stmt_list) { tree block = NULL; if (stmts_are_full_exprs_p ()) block = poplevel (kept_level_p (), 1, 0); stmt_list = pop_stmt_list (stmt_list); if (!processing_template_decl) { stmt_list = c_build_bind_expr (input_location, block, stmt_list); /* ??? See c_end_compound_stmt re statement expressions. */ } return stmt_list; } /* Begin a new scope. */ static tree do_pushlevel (scope_kind sk) { tree ret = push_stmt_list (); if (stmts_are_full_exprs_p ()) begin_scope (sk, NULL); return ret; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl); CLEANUP_EH_ONLY (stmt) = eh_only; add_stmt (stmt); CLEANUP_BODY (stmt) = push_stmt_list (); } /* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all the current loops, represented by 'NULL_TREE' if we've seen a possible exit, and 'error_mark_node' if not. This is currently used only to suppress the warning about a function with no return statements, and therefore we don't bother noting returns as possible exits. We also don't bother with gotos. */ static void begin_maybe_infinite_loop (tree cond) { /* Only track this while parsing a function, not during instantiation. */ if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; bool maybe_infinite = true; if (cond) { cond = fold_non_dependent_expr (cond); maybe_infinite = integer_nonzerop (cond); } vec_safe_push (cp_function_chain->infinite_loops, maybe_infinite ? error_mark_node : NULL_TREE); } /* A break is a possible exit for the current loop. */ void break_maybe_infinite_loop (void) { if (!cfun) return; cp_function_chain->infinite_loops->last() = NULL_TREE; } /* If we reach the end of the loop without seeing a possible exit, we have an infinite loop. */ static void end_maybe_infinite_loop (tree cond) { if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; tree current = cp_function_chain->infinite_loops->pop(); if (current != NULL_TREE) { cond = fold_non_dependent_expr (cond); if (integer_nonzerop (cond)) current_function_infinite_loop = 1; } } /* Begin a conditional that might contain a declaration. When generating normal code, we want the declaration to appear before the statement containing the conditional. When generating template code, we want the conditional to be rendered as the raw DECL_EXPR. */ static void begin_cond (tree *cond_p) { if (processing_template_decl) *cond_p = push_stmt_list (); } /* Finish such a conditional. */ static void finish_cond (tree *cond_p, tree expr) { if (processing_template_decl) { tree cond = pop_stmt_list (*cond_p); if (expr == NULL_TREE) /* Empty condition in 'for'. */ gcc_assert (empty_expr_stmt_p (cond)); else if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; else if (!empty_expr_stmt_p (cond)) expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr); } *cond_p = expr; } /* If *COND_P specifies a conditional with a declaration, transform the loop such that while (A x = 42) { } for (; A x = 42;) { } becomes while (true) { A x = 42; if (!x) break; } for (;;) { A x = 42; if (!x) break; } The statement list for BODY will be empty if the conditional did not declare anything. */ static void simplify_loop_decl_cond (tree *cond_p, tree body) { tree cond, if_stmt; if (!TREE_SIDE_EFFECTS (body)) return; cond = *cond_p; *cond_p = boolean_true_node; if_stmt = begin_if_stmt (); cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, false, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_break_stmt (); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } /* Finish a goto-statement. */ tree finish_goto_stmt (tree destination) { if (identifier_p (destination)) destination = lookup_label (destination); /* We warn about unused labels with -Wunused. That means we have to mark the used labels as used. */ if (TREE_CODE (destination) == LABEL_DECL) TREE_USED (destination) = 1; else { destination = mark_rvalue_use (destination); if (!processing_template_decl) { destination = cp_convert (ptr_type_node, destination, tf_warning_or_error); if (error_operand_p (destination)) return NULL_TREE; destination = fold_build_cleanup_point_expr (TREE_TYPE (destination), destination); } } check_goto (destination); add_stmt (build_predict_expr (PRED_GOTO, NOT_TAKEN)); return add_stmt (build_stmt (input_location, GOTO_EXPR, destination)); } /* COND is the condition-expression for an if, while, etc., statement. Convert it to a boolean value, if appropriate. In addition, verify sequence points if -Wsequence-point is enabled. */ static tree maybe_convert_cond (tree cond) { /* Empty conditions remain empty. */ if (!cond) return NULL_TREE; /* Wait until we instantiate templates before doing conversion. */ if (processing_template_decl) return cond; if (warn_sequence_point) verify_sequence_points (cond); /* Do the conversion. */ cond = convert_from_reference (cond); if (TREE_CODE (cond) == MODIFY_EXPR && !TREE_NO_WARNING (cond) && warn_parentheses) { warning_at (EXPR_LOC_OR_LOC (cond, input_location), OPT_Wparentheses, "suggest parentheses around assignment used as truth value"); TREE_NO_WARNING (cond) = 1; } return condition_conversion (cond); } /* Finish an expression-statement, whose EXPRESSION is as indicated. */ tree finish_expr_stmt (tree expr) { tree r = NULL_TREE; location_t loc = EXPR_LOCATION (expr); if (expr != NULL_TREE) { /* If we ran into a problem, make sure we complained. */ gcc_assert (expr != error_mark_node || seen_error ()); if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; /* Simplification of inner statement expressions, compound exprs, etc can result in us already having an EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) { if (TREE_CODE (expr) != EXPR_STMT) expr = build_stmt (loc, EXPR_STMT, expr); expr = maybe_cleanup_point_expr_void (expr); } r = add_stmt (expr); } return r; } /* Begin an if-statement. Returns a newly created IF_STMT if appropriate. */ tree begin_if_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, IF_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); current_binding_level->this_entity = r; begin_cond (&IF_COND (r)); return r; } /* Process the COND of an if-statement, which may be given by IF_STMT. */ tree finish_if_stmt_cond (tree cond, tree if_stmt) { cond = maybe_convert_cond (cond); if (IF_STMT_CONSTEXPR_P (if_stmt) && !type_dependent_expression_p (cond) && require_constant_expression (cond) && !instantiation_dependent_expression_p (cond) /* Wait until instantiation time, since only then COND has been converted to bool. */ && TREE_TYPE (cond) == boolean_type_node) { cond = instantiate_non_dependent_expr (cond); cond = cxx_constant_value (cond, NULL_TREE); } finish_cond (&IF_COND (if_stmt), cond); add_stmt (if_stmt); THEN_CLAUSE (if_stmt) = push_stmt_list (); return cond; } /* Finish the then-clause of an if-statement, which may be given by IF_STMT. */ tree finish_then_clause (tree if_stmt) { THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt)); return if_stmt; } /* Begin the else-clause of an if-statement. */ void begin_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the else-clause of an if-statement, which may be given by IF_STMT. */ void finish_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt)); } /* Finish an if-statement. */ void finish_if_stmt (tree if_stmt) { tree scope = IF_SCOPE (if_stmt); IF_SCOPE (if_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a while-statement. Returns a newly created WHILE_STMT if appropriate. */ tree begin_while_stmt (void) { tree r; r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE); add_stmt (r); WHILE_BODY (r) = do_pushlevel (sk_block); begin_cond (&WHILE_COND (r)); return r; } /* Process the COND of a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); finish_cond (&WHILE_COND (while_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt)); } /* Finish a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt (tree while_stmt) { end_maybe_infinite_loop (boolean_true_node); WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt)); } /* Begin a do-statement. Returns a newly created DO_STMT if appropriate. */ tree begin_do_stmt (void) { tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE); begin_maybe_infinite_loop (boolean_true_node); add_stmt (r); DO_BODY (r) = push_stmt_list (); return r; } /* Finish the body of a do-statement, which may be given by DO_STMT. */ void finish_do_body (tree do_stmt) { tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt)); if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body)) body = STATEMENT_LIST_TAIL (body)->stmt; if (IS_EMPTY_STMT (body)) warning (OPT_Wempty_body, "suggest explicit braces around empty body in %<do%> statement"); } /* Finish a do-statement, which may be given by DO_STMT, and whose COND is as indicated. */ void finish_do_stmt (tree cond, tree do_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); end_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); DO_COND (do_stmt) = cond; } /* Finish a return-statement. The EXPRESSION returned, if any, is as indicated. */ tree finish_return_stmt (tree expr) { tree r; bool no_warning; expr = check_return_expr (expr, &no_warning); if (error_operand_p (expr) || (flag_openmp && !check_omp_return ())) { /* Suppress -Wreturn-type for this function. */ if (warn_return_type) TREE_NO_WARNING (current_function_decl) = true; return error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); if (DECL_DESTRUCTOR_P (current_function_decl) || (DECL_CONSTRUCTOR_P (current_function_decl) && targetm.cxx.cdtor_returns_this ())) { /* Similarly, all destructors must run destructors for base-classes before returning. So, all returns in a destructor get sent to the DTOR_LABEL; finish_function emits code to return a value there. */ return finish_goto_stmt (cdtor_label); } } r = build_stmt (input_location, RETURN_EXPR, expr); TREE_NO_WARNING (r) |= no_warning; r = maybe_cleanup_point_expr_void (r); r = add_stmt (r); return r; } /* Begin the scope of a for-statement or a range-for-statement. Both the returned trees are to be used in a call to begin_for_stmt or begin_range_for_stmt. */ tree begin_for_scope (tree *init) { tree scope = NULL_TREE; if (flag_new_for_scope) scope = do_pushlevel (sk_for); if (processing_template_decl) *init = push_stmt_list (); else *init = NULL_TREE; return scope; } /* Begin a for-statement. Returns a new FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE */ tree begin_for_stmt (tree scope, tree init) { tree r; r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !flag_new_for_scope); if (!init) scope = begin_for_scope (&init); } FOR_INIT_STMT (r) = init; FOR_SCOPE (r) = scope; return r; } /* Finish the init-statement of a for-statement, which may be given by FOR_STMT. */ void finish_init_stmt (tree for_stmt) { if (processing_template_decl) FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt)); add_stmt (for_stmt); FOR_BODY (for_stmt) = do_pushlevel (sk_block); begin_cond (&FOR_COND (for_stmt)); } /* Finish the COND of a for-statement, which may be given by FOR_STMT. */ void finish_for_cond (tree cond, tree for_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); finish_cond (&FOR_COND (for_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt)); } /* Finish the increment-EXPRESSION in a for-statement, which may be given by FOR_STMT. */ void finish_for_expr (tree expr, tree for_stmt) { if (!expr) return; /* If EXPR is an overloaded function, issue an error; there is no context available to use to perform overload resolution. */ if (type_unknown_p (expr)) { cxx_incomplete_type_error (expr, TREE_TYPE (expr)); expr = error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; FOR_EXPR (for_stmt) = expr; } /* Finish the body of a for-statement, which may be given by FOR_STMT. The increment-EXPR for the loop must be provided. It can also finish RANGE_FOR_STMT. */ void finish_for_stmt (tree for_stmt) { end_maybe_infinite_loop (boolean_true_node); if (TREE_CODE (for_stmt) == RANGE_FOR_STMT) RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt)); else FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt)); /* Pop the scope for the body of the loop. */ if (flag_new_for_scope) { tree scope; tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT ? &RANGE_FOR_SCOPE (for_stmt) : &FOR_SCOPE (for_stmt)); scope = *scope_ptr; *scope_ptr = NULL; add_stmt (do_poplevel (scope)); } } /* Begin a range-for-statement. Returns a new RANGE_FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE . To finish it call finish_for_stmt(). */ tree begin_range_for_stmt (tree scope, tree init) { tree r; begin_maybe_infinite_loop (boolean_false_node); r = build_stmt (input_location, RANGE_FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !flag_new_for_scope); if (!init) scope = begin_for_scope (&init); } /* RANGE_FOR_STMTs do not use nor save the init tree, so we pop it now. */ if (init) pop_stmt_list (init); RANGE_FOR_SCOPE (r) = scope; return r; } /* Finish the head of a range-based for statement, which may be given by RANGE_FOR_STMT. DECL must be the declaration and EXPR must be the loop expression. */ void finish_range_for_decl (tree range_for_stmt, tree decl, tree expr) { RANGE_FOR_DECL (range_for_stmt) = decl; RANGE_FOR_EXPR (range_for_stmt) = expr; add_stmt (range_for_stmt); RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block); } /* Finish a break-statement. */ tree finish_break_stmt (void) { /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ if (!block_may_fallthru (cur_stmt_list)) return void_node; note_break_stmt (); return add_stmt (build_stmt (input_location, BREAK_STMT)); } /* Finish a continue-statement. */ tree finish_continue_stmt (void) { return add_stmt (build_stmt (input_location, CONTINUE_STMT)); } /* Begin a switch-statement. Returns a new SWITCH_STMT if appropriate. */ tree begin_switch_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&SWITCH_STMT_COND (r)); return r; } /* Finish the cond of a switch-statement. */ void finish_switch_cond (tree cond, tree switch_stmt) { tree orig_type = NULL; if (!processing_template_decl) { /* Convert the condition to an integer or enumeration type. */ cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true); if (cond == NULL_TREE) { error ("switch quantity not an integer"); cond = error_mark_node; } /* We want unlowered type here to handle enum bit-fields. */ orig_type = unlowered_expr_type (cond); if (TREE_CODE (orig_type) != ENUMERAL_TYPE) orig_type = TREE_TYPE (cond); if (cond != error_mark_node) { /* [stmt.switch] Integral promotions are performed. */ cond = perform_integral_promotions (cond); cond = maybe_cleanup_point_expr (cond); } } if (check_for_bare_parameter_packs (cond)) cond = error_mark_node; else if (!processing_template_decl && warn_sequence_point) verify_sequence_points (cond); finish_cond (&SWITCH_STMT_COND (switch_stmt), cond); SWITCH_STMT_TYPE (switch_stmt) = orig_type; add_stmt (switch_stmt); push_switch (switch_stmt); SWITCH_STMT_BODY (switch_stmt) = push_stmt_list (); } /* Finish the body of a switch-statement, which may be given by SWITCH_STMT. The COND to switch on is indicated. */ void finish_switch_stmt (tree switch_stmt) { tree scope; SWITCH_STMT_BODY (switch_stmt) = pop_stmt_list (SWITCH_STMT_BODY (switch_stmt)); pop_switch (); scope = SWITCH_STMT_SCOPE (switch_stmt); SWITCH_STMT_SCOPE (switch_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a try-block. Returns a newly-created TRY_BLOCK if appropriate. */ tree begin_try_block (void) { tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE); add_stmt (r); TRY_STMTS (r) = push_stmt_list (); return r; } /* Likewise, for a function-try-block. The block returned in *COMPOUND_STMT is an artificial outer scope, containing the function-try-block. */ tree begin_function_try_block (tree *compound_stmt) { tree r; /* This outer scope does not exist in the C++ standard, but we need a place to put __FUNCTION__ and similar variables. */ *compound_stmt = begin_compound_stmt (0); r = begin_try_block (); FN_TRY_BLOCK_P (r) = 1; return r; } /* Finish a try-block, which may be given by TRY_BLOCK. */ void finish_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); TRY_HANDLERS (try_block) = push_stmt_list (); } /* Finish the body of a cleanup try-block, which may be given by TRY_BLOCK. */ void finish_cleanup_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); } /* Finish an implicitly generated try-block, with a cleanup is given by CLEANUP. */ void finish_cleanup (tree cleanup, tree try_block) { TRY_HANDLERS (try_block) = cleanup; CLEANUP_P (try_block) = 1; } /* Likewise, for a function-try-block. */ void finish_function_try_block (tree try_block) { finish_try_block (try_block); /* FIXME : something queer about CTOR_INITIALIZER somehow following the try block, but moving it inside. */ in_function_try_handler = 1; } /* Finish a handler-sequence for a try-block, which may be given by TRY_BLOCK. */ void finish_handler_sequence (tree try_block) { TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block)); check_handlers (TRY_HANDLERS (try_block)); } /* Finish the handler-seq for a function-try-block, given by TRY_BLOCK. COMPOUND_STMT is the outer block created by begin_function_try_block. */ void finish_function_handler_sequence (tree try_block, tree compound_stmt) { in_function_try_handler = 0; finish_handler_sequence (try_block); finish_compound_stmt (compound_stmt); } /* Begin a handler. Returns a HANDLER if appropriate. */ tree begin_handler (void) { tree r; r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE); add_stmt (r); /* Create a binding level for the eh_info and the exception object cleanup. */ HANDLER_BODY (r) = do_pushlevel (sk_catch); return r; } /* Finish the handler-parameters for a handler, which may be given by HANDLER. DECL is the declaration for the catch parameter, or NULL if this is a `catch (...)' clause. */ void finish_handler_parms (tree decl, tree handler) { tree type = NULL_TREE; if (processing_template_decl) { if (decl) { decl = pushdecl (decl); decl = push_template_decl (decl); HANDLER_PARMS (handler) = decl; type = TREE_TYPE (decl); } } else { type = expand_start_catch_block (decl); if (warn_catch_value && type != NULL_TREE && type != error_mark_node && TREE_CODE (TREE_TYPE (decl)) != REFERENCE_TYPE) { tree orig_type = TREE_TYPE (decl); if (CLASS_TYPE_P (orig_type)) { if (TYPE_POLYMORPHIC_P (orig_type)) warning (OPT_Wcatch_value_, "catching polymorphic type %q#T by value", orig_type); else if (warn_catch_value > 1) warning (OPT_Wcatch_value_, "catching type %q#T by value", orig_type); } else if (warn_catch_value > 2) warning (OPT_Wcatch_value_, "catching non-reference type %q#T", orig_type); } } HANDLER_TYPE (handler) = type; } /* Finish a handler, which may be given by HANDLER. The BLOCKs are the return value from the matching call to finish_handler_parms. */ void finish_handler (tree handler) { if (!processing_template_decl) expand_end_catch_block (); HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler)); } /* Begin a compound statement. FLAGS contains some bits that control the behavior and context. If BCS_NO_SCOPE is set, the compound statement does not define a scope. If BCS_FN_BODY is set, this is the outermost block of a function. If BCS_TRY_BLOCK is set, this is the block created on behalf of a TRY statement. Returns a token to be passed to finish_compound_stmt. */ tree begin_compound_stmt (unsigned int flags) { tree r; if (flags & BCS_NO_SCOPE) { r = push_stmt_list (); STATEMENT_LIST_NO_SCOPE (r) = 1; /* Normally, we try hard to keep the BLOCK for a statement-expression. But, if it's a statement-expression with a scopeless block, there's nothing to keep, and we don't want to accidentally keep a block *inside* the scopeless block. */ keep_next_level (false); } else { scope_kind sk = sk_block; if (flags & BCS_TRY_BLOCK) sk = sk_try; else if (flags & BCS_TRANSACTION) sk = sk_transaction; r = do_pushlevel (sk); } /* When processing a template, we need to remember where the braces were, so that we can set up identical scopes when instantiating the template later. BIND_EXPR is a handy candidate for this. Note that do_poplevel won't create a BIND_EXPR itself here (and thus result in nested BIND_EXPRs), since we don't build BLOCK nodes when processing templates. */ if (processing_template_decl) { r = build3 (BIND_EXPR, NULL, NULL, r, NULL); BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0; BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0; TREE_SIDE_EFFECTS (r) = 1; } return r; } /* Finish a compound-statement, which is given by STMT. */ void finish_compound_stmt (tree stmt) { if (TREE_CODE (stmt) == BIND_EXPR) { tree body = do_poplevel (BIND_EXPR_BODY (stmt)); /* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special, discard the BIND_EXPR so it can be merged with the containing STATEMENT_LIST. */ if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_HEAD (body) == NULL && !BIND_EXPR_BODY_BLOCK (stmt) && !BIND_EXPR_TRY_BLOCK (stmt)) stmt = body; else BIND_EXPR_BODY (stmt) = body; } else if (STATEMENT_LIST_NO_SCOPE (stmt)) stmt = pop_stmt_list (stmt); else { /* Destroy any ObjC "super" receivers that may have been created. */ objc_clear_super_receiver (); stmt = do_poplevel (stmt); } /* ??? See c_end_compound_stmt wrt statement expressions. */ add_stmt (stmt); } /* Finish an asm-statement, whose components are a STRING, some OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some LABELS. Also note whether the asm-statement should be considered volatile. */ tree finish_asm_stmt (int volatile_p, tree string, tree output_operands, tree input_operands, tree clobbers, tree labels) { tree r; tree t; int ninputs = list_length (input_operands); int noutputs = list_length (output_operands); if (!processing_template_decl) { const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; tree operand; int i; oconstraints = XALLOCAVEC (const char *, noutputs); string = resolve_asm_operand_names (string, output_operands, input_operands, labels); for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i) { operand = TREE_VALUE (t); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (operand); operand = mark_lvalue_use (operand); if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error)) operand = error_mark_node; if (operand != error_mark_node && (TREE_READONLY (operand) || CP_TYPE_CONST_P (TREE_TYPE (operand)) /* Functions are not modifiable, even though they are lvalues. */ || TREE_CODE (TREE_TYPE (operand)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (operand)) == METHOD_TYPE /* If it's an aggregate and any field is const, then it is effectively const. */ || (CLASS_TYPE_P (TREE_TYPE (operand)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (operand))))) cxx_readonly_error (operand, lv_asm); tree *op = &operand; while (TREE_CODE (*op) == COMPOUND_EXPR) op = &TREE_OPERAND (*op, 1); switch (TREE_CODE (*op)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case MODIFY_EXPR: *op = genericize_compound_lvalue (*op); op = &TREE_OPERAND (*op, 1); break; default: break; } constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !cxx_mark_addressable (*op)) operand = error_mark_node; } else operand = error_mark_node; TREE_VALUE (t) = operand; } for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); bool constraint_parsed = parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is going to end up in memory, don't call decay_conversion. */ if (constraint_parsed && !allows_reg && allows_mem) operand = mark_lvalue_use (TREE_VALUE (t)); else operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error); /* If the type of the operand hasn't been determined (e.g., because it involves an overloaded function), then issue an error message. There's no context available to resolve the overloading. */ if (TREE_TYPE (operand) == unknown_type_node) { error ("type of asm operand %qE could not be determined", TREE_VALUE (t)); operand = error_mark_node; } if (constraint_parsed) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (operand); tree *op = &operand; while (TREE_CODE (*op) == COMPOUND_EXPR) op = &TREE_OPERAND (*op, 1); switch (TREE_CODE (*op)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case MODIFY_EXPR: *op = genericize_compound_lvalue (*op); op = &TREE_OPERAND (*op, 1); break; default: break; } if (!cxx_mark_addressable (*op)) operand = error_mark_node; } else if (!allows_reg && !allows_mem) { /* If constraint allows neither register nor memory, try harder to get a constant. */ tree constop = maybe_constant_value (operand); if (TREE_CONSTANT (constop)) operand = constop; } } else operand = error_mark_node; TREE_VALUE (t) = operand; } } r = build_stmt (input_location, ASM_EXPR, string, output_operands, input_operands, clobbers, labels); ASM_VOLATILE_P (r) = volatile_p || noutputs == 0; r = maybe_cleanup_point_expr_void (r); return add_stmt (r); } /* Finish a label with the indicated NAME. Returns the new label. */ tree finish_label_stmt (tree name) { tree decl = define_label (input_location, name); if (decl == error_mark_node) return error_mark_node; add_stmt (build_stmt (input_location, LABEL_EXPR, decl)); return decl; } /* Finish a series of declarations for local labels. G++ allows users to declare "local" labels, i.e., labels with scope. This extension is useful when writing code involving statement-expressions. */ void finish_label_decl (tree name) { if (!at_function_scope_p ()) { error ("__label__ declarations are only allowed in function scopes"); return; } add_decl_expr (declare_local_label (name)); } /* When DECL goes out of scope, make sure that CLEANUP is executed. */ void finish_decl_cleanup (tree decl, tree cleanup) { push_cleanup (decl, cleanup, false); } /* If the current scope exits with an exception, run CLEANUP. */ void finish_eh_cleanup (tree cleanup) { push_cleanup (NULL, cleanup, true); } /* The MEM_INITS is a list of mem-initializers, in reverse of the order they were written by the user. Each node is as for emit_mem_initializers. */ void finish_mem_initializers (tree mem_inits) { /* Reorder the MEM_INITS so that they are in the order they appeared in the source program. */ mem_inits = nreverse (mem_inits); if (processing_template_decl) { tree mem; for (mem = mem_inits; mem; mem = TREE_CHAIN (mem)) { /* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the check for bare parameter packs in the TREE_VALUE, because any parameter packs in the TREE_VALUE have already been bound as part of the TREE_PURPOSE. See make_pack_expansion for more information. */ if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION && check_for_bare_parameter_packs (TREE_VALUE (mem))) TREE_VALUE (mem) = error_mark_node; } add_stmt (build_min_nt_loc (UNKNOWN_LOCATION, CTOR_INITIALIZER, mem_inits)); } else emit_mem_initializers (mem_inits); } /* Obfuscate EXPR if it looks like an id-expression or member access so that the call to finish_decltype in do_auto_deduction will give the right result. */ tree force_paren_expr (tree expr) { /* This is only needed for decltype(auto) in C++14. */ if (cxx_dialect < cxx14) return expr; /* If we're in unevaluated context, we can't be deducing a return/initializer type, so we don't need to mess with this. */ if (cp_unevaluated_operand) return expr; if (!DECL_P (expr) && TREE_CODE (expr) != COMPONENT_REF && TREE_CODE (expr) != SCOPE_REF) return expr; if (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == SCOPE_REF) REF_PARENTHESIZED_P (expr) = true; else if (processing_template_decl) expr = build1 (PAREN_EXPR, TREE_TYPE (expr), expr); else if (VAR_P (expr) && DECL_HARD_REGISTER (expr)) /* We can't bind a hard register variable to a reference. */; else { cp_lvalue_kind kind = lvalue_kind (expr); if ((kind & ~clk_class) != clk_none) { tree type = unlowered_expr_type (expr); bool rval = !!(kind & clk_rvalueref); type = cp_build_reference_type (type, rval); /* This inhibits warnings in, eg, cxx_mark_addressable (c++/60955). */ warning_sentinel s (extra_warnings); expr = build_static_cast (type, expr, tf_error); if (expr != error_mark_node) REF_PARENTHESIZED_P (expr) = true; } } return expr; } /* If T is an id-expression obfuscated by force_paren_expr, undo the obfuscation and return the underlying id-expression. Otherwise return T. */ tree maybe_undo_parenthesized_ref (tree t) { if (cxx_dialect < cxx14) return t; if (INDIRECT_REF_P (t) && REF_PARENTHESIZED_P (t)) { t = TREE_OPERAND (t, 0); while (TREE_CODE (t) == NON_LVALUE_EXPR || TREE_CODE (t) == NOP_EXPR) t = TREE_OPERAND (t, 0); gcc_assert (TREE_CODE (t) == ADDR_EXPR || TREE_CODE (t) == STATIC_CAST_EXPR); t = TREE_OPERAND (t, 0); } else if (TREE_CODE (t) == PAREN_EXPR) t = TREE_OPERAND (t, 0); return t; } /* Finish a parenthesized expression EXPR. */ cp_expr finish_parenthesized_expr (cp_expr expr) { if (EXPR_P (expr)) /* This inhibits warnings in c_common_truthvalue_conversion. */ TREE_NO_WARNING (expr) = 1; if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == SCOPE_REF) /* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be enclosed in parentheses. */ PTRMEM_OK_P (expr) = 0; if (TREE_CODE (expr) == STRING_CST) PAREN_STRING_LITERAL_P (expr) = 1; expr = cp_expr (force_paren_expr (expr), expr.get_location ()); return expr; } /* Finish a reference to a non-static data member (DECL) that is not preceded by `.' or `->'. */ tree finish_non_static_data_member (tree decl, tree object, tree qualifying_scope) { gcc_assert (TREE_CODE (decl) == FIELD_DECL); bool try_omp_private = !object && omp_private_member_map; tree ret; if (!object) { tree scope = qualifying_scope; if (scope == NULL_TREE) scope = context_for_name_lookup (decl); object = maybe_dummy_object (scope, NULL); } object = maybe_resolve_dummy (object, true); if (object == error_mark_node) return error_mark_node; /* DR 613/850: Can use non-static data members without an associated object in sizeof/decltype/alignof. */ if (is_dummy_object (object) && cp_unevaluated_operand == 0 && (!processing_template_decl || !current_class_ref)) { if (current_function_decl && DECL_STATIC_FUNCTION_P (current_function_decl)) error ("invalid use of member %qD in static member function", decl); else error ("invalid use of non-static data member %qD", decl); inform (DECL_SOURCE_LOCATION (decl), "declared here"); return error_mark_node; } if (current_class_ptr) TREE_USED (current_class_ptr) = 1; if (processing_template_decl && !qualifying_scope) { tree type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE) /* Quals on the object don't matter. */; else if (PACK_EXPANSION_P (type)) /* Don't bother trying to represent this. */ type = NULL_TREE; else { /* Set the cv qualifiers. */ int quals = cp_type_quals (TREE_TYPE (object)); if (DECL_MUTABLE_P (decl)) quals &= ~TYPE_QUAL_CONST; quals |= cp_type_quals (TREE_TYPE (decl)); type = cp_build_qualified_type (type, quals); } ret = (convert_from_reference (build_min (COMPONENT_REF, type, object, decl, NULL_TREE))); } /* If PROCESSING_TEMPLATE_DECL is nonzero here, then QUALIFYING_SCOPE is also non-null. Wrap this in a SCOPE_REF for now. */ else if (processing_template_decl) ret = build_qualified_name (TREE_TYPE (decl), qualifying_scope, decl, /*template_p=*/false); else { tree access_type = TREE_TYPE (object); perform_or_defer_access_check (TYPE_BINFO (access_type), decl, decl, tf_warning_or_error); /* If the data member was named `C::M', convert `*this' to `C' first. */ if (qualifying_scope) { tree binfo = NULL_TREE; object = build_scoped_ref (object, qualifying_scope, &binfo); } ret = build_class_member_access_expr (object, decl, /*access_path=*/NULL_TREE, /*preserve_reference=*/false, tf_warning_or_error); } if (try_omp_private) { tree *v = omp_private_member_map->get (decl); if (v) ret = convert_from_reference (*v); } return ret; } /* If we are currently parsing a template and we encountered a typedef TYPEDEF_DECL that is being accessed though CONTEXT, this function adds the typedef to a list tied to the current template. At template instantiation time, that list is walked and access check performed for each typedef. LOCATION is the location of the usage point of TYPEDEF_DECL. */ void add_typedef_to_current_template_for_access_check (tree typedef_decl, tree context, location_t location) { tree template_info = NULL; tree cs = current_scope (); if (!is_typedef_decl (typedef_decl) || !context || !CLASS_TYPE_P (context) || !cs) return; if (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL) template_info = get_template_info (cs); if (template_info && TI_TEMPLATE (template_info) && !currently_open_class (context)) append_type_to_template_for_access_check (cs, typedef_decl, context, location); } /* DECL was the declaration to which a qualified-id resolved. Issue an error message if it is not accessible. If OBJECT_TYPE is non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the type of `*x', or `x', respectively. If the DECL was named as `A::B' then NESTED_NAME_SPECIFIER is `A'. */ void check_accessibility_of_qualified_id (tree decl, tree object_type, tree nested_name_specifier) { tree scope; tree qualifying_type = NULL_TREE; /* If we are parsing a template declaration and if decl is a typedef, add it to a list tied to the template. At template instantiation time, that list will be walked and access check performed. */ add_typedef_to_current_template_for_access_check (decl, nested_name_specifier ? nested_name_specifier : DECL_CONTEXT (decl), input_location); /* If we're not checking, return immediately. */ if (deferred_access_no_check) return; /* Determine the SCOPE of DECL. */ scope = context_for_name_lookup (decl); /* If the SCOPE is not a type, then DECL is not a member. */ if (!TYPE_P (scope)) return; /* Compute the scope through which DECL is being accessed. */ if (object_type /* OBJECT_TYPE might not be a class type; consider: class A { typedef int I; }; I *p; p->A::I::~I(); In this case, we will have "A::I" as the DECL, but "I" as the OBJECT_TYPE. */ && CLASS_TYPE_P (object_type) && DERIVED_FROM_P (scope, object_type)) /* If we are processing a `->' or `.' expression, use the type of the left-hand side. */ qualifying_type = object_type; else if (nested_name_specifier) { /* If the reference is to a non-static member of the current class, treat it as if it were referenced through `this'. */ tree ct; if (DECL_NONSTATIC_MEMBER_P (decl) && current_class_ptr && DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ())) qualifying_type = ct; /* Otherwise, use the type indicated by the nested-name-specifier. */ else qualifying_type = nested_name_specifier; } else /* Otherwise, the name must be from the current class or one of its bases. */ qualifying_type = currently_open_derived_class (scope); if (qualifying_type /* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM or similar in a default argument value. */ && CLASS_TYPE_P (qualifying_type) && !dependent_type_p (qualifying_type)) perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl, decl, tf_warning_or_error); } /* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the class named to the left of the "::" operator. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. */ tree finish_qualified_id_expr (tree qualifying_class, tree expr, bool done, bool address_p, bool template_p, bool template_arg_p, tsubst_flags_t complain) { gcc_assert (TYPE_P (qualifying_class)); if (error_operand_p (expr)) return error_mark_node; if ((DECL_P (expr) || BASELINK_P (expr)) && !mark_used (expr, complain)) return error_mark_node; if (template_p) { if (TREE_CODE (expr) == UNBOUND_CLASS_TEMPLATE) { /* cp_parser_lookup_name thought we were looking for a type, but we're actually looking for a declaration. */ qualifying_class = TYPE_CONTEXT (expr); expr = TYPE_IDENTIFIER (expr); } else check_template_keyword (expr); } /* If EXPR occurs as the operand of '&', use special handling that permits a pointer-to-member. */ if (address_p && done) { if (TREE_CODE (expr) == SCOPE_REF) expr = TREE_OPERAND (expr, 1); expr = build_offset_ref (qualifying_class, expr, /*address_p=*/true, complain); return expr; } /* No need to check access within an enum. */ if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE && TREE_CODE (expr) != IDENTIFIER_NODE) return expr; /* Within the scope of a class, turn references to non-static members into expression of the form "this->...". */ if (template_arg_p) /* But, within a template argument, we do not want make the transformation, as there is no "this" pointer. */ ; else if (TREE_CODE (expr) == FIELD_DECL) { push_deferring_access_checks (dk_no_check); expr = finish_non_static_data_member (expr, NULL_TREE, qualifying_class); pop_deferring_access_checks (); } else if (BASELINK_P (expr)) { /* See if any of the functions are non-static members. */ /* If so, the expression may be relative to 'this'. */ if (!shared_member_p (expr) && current_class_ptr && DERIVED_FROM_P (qualifying_class, current_nonlambda_class_type ())) expr = (build_class_member_access_expr (maybe_dummy_object (qualifying_class, NULL), expr, BASELINK_ACCESS_BINFO (expr), /*preserve_reference=*/false, complain)); else if (done) /* The expression is a qualified name whose address is not being taken. */ expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false, complain); } else { /* In a template, return a SCOPE_REF for most qualified-ids so that we can check access at instantiation time. But if we're looking at a member of the current instantiation, we know we have access and building up the SCOPE_REF confuses non-type template argument handling. */ if (processing_template_decl && (!currently_open_class (qualifying_class) || TREE_CODE (expr) == BIT_NOT_EXPR)) expr = build_qualified_name (TREE_TYPE (expr), qualifying_class, expr, template_p); expr = convert_from_reference (expr); } return expr; } /* Begin a statement-expression. The value returned must be passed to finish_stmt_expr. */ tree begin_stmt_expr (void) { return push_stmt_list (); } /* Process the final expression of a statement expression. EXPR can be NULL, if the final expression is empty. Return a STATEMENT_LIST containing all the statements in the statement-expression, or ERROR_MARK_NODE if there was an error. */ tree finish_stmt_expr_expr (tree expr, tree stmt_expr) { if (error_operand_p (expr)) { /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } /* If the last statement does not have "void" type, then the value of the last statement is the value of the entire expression. */ if (expr) { tree type = TREE_TYPE (expr); if (type && type_unknown_p (type)) { error ("a statement expression is an insufficient context" " for overload resolution"); TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } else if (processing_template_decl) { expr = build_stmt (input_location, EXPR_STMT, expr); expr = add_stmt (expr); /* Mark the last statement so that we can recognize it as such at template-instantiation time. */ EXPR_STMT_STMT_EXPR_RESULT (expr) = 1; } else if (VOID_TYPE_P (type)) { /* Just treat this like an ordinary statement. */ expr = finish_expr_stmt (expr); } else { /* It actually has a value we need to deal with. First, force it to be an rvalue so that we won't need to build up a copy constructor call later when we try to assign it to something. */ expr = force_rvalue (expr, tf_warning_or_error); if (error_operand_p (expr)) return error_mark_node; /* Update for array-to-pointer decay. */ type = TREE_TYPE (expr); /* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a normal statement, but don't convert to void or actually add the EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) expr = maybe_cleanup_point_expr (expr); add_stmt (expr); } /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = type; } return stmt_expr; } /* Finish a statement-expression. EXPR should be the value returned by the previous begin_stmt_expr. Returns an expression representing the statement-expression. */ tree finish_stmt_expr (tree stmt_expr, bool has_no_scope) { tree type; tree result; if (error_operand_p (stmt_expr)) { pop_stmt_list (stmt_expr); return error_mark_node; } gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST); type = TREE_TYPE (stmt_expr); result = pop_stmt_list (stmt_expr); TREE_TYPE (result) = type; if (processing_template_decl) { result = build_min (STMT_EXPR, type, result); TREE_SIDE_EFFECTS (result) = 1; STMT_EXPR_NO_SCOPE (result) = has_no_scope; } else if (CLASS_TYPE_P (type)) { /* Wrap the statement-expression in a TARGET_EXPR so that the temporary object created by the final expression is destroyed at the end of the full-expression containing the statement-expression. */ result = force_target_expr (type, result, tf_warning_or_error); } return result; } /* Returns the expression which provides the value of STMT_EXPR. */ tree stmt_expr_value_expr (tree stmt_expr) { tree t = STMT_EXPR_STMT (stmt_expr); if (TREE_CODE (t) == BIND_EXPR) t = BIND_EXPR_BODY (t); if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t)) t = STATEMENT_LIST_TAIL (t)->stmt; if (TREE_CODE (t) == EXPR_STMT) t = EXPR_STMT_EXPR (t); return t; } /* Return TRUE iff EXPR_STMT is an empty list of expression statements. */ bool empty_expr_stmt_p (tree expr_stmt) { tree body = NULL_TREE; if (expr_stmt == void_node) return true; if (expr_stmt) { if (TREE_CODE (expr_stmt) == EXPR_STMT) body = EXPR_STMT_EXPR (expr_stmt); else if (TREE_CODE (expr_stmt) == STATEMENT_LIST) body = expr_stmt; } if (body) { if (TREE_CODE (body) == STATEMENT_LIST) return tsi_end_p (tsi_start (body)); else return empty_expr_stmt_p (body); } return false; } /* Perform Koenig lookup. FN is the postfix-expression representing the function (or functions) to call; ARGS are the arguments to the call. Returns the functions to be considered by overload resolution. */ cp_expr perform_koenig_lookup (cp_expr fn, vec<tree, va_gc> *args, tsubst_flags_t complain) { tree identifier = NULL_TREE; tree functions = NULL_TREE; tree tmpl_args = NULL_TREE; bool template_id = false; location_t loc = fn.get_location (); if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) { /* Use a separate flag to handle null args. */ template_id = true; tmpl_args = TREE_OPERAND (fn, 1); fn = TREE_OPERAND (fn, 0); } /* Find the name of the overloaded function. */ if (identifier_p (fn)) identifier = fn; else { functions = fn; identifier = OVL_NAME (functions); } /* A call to a namespace-scope function using an unqualified name. Do Koenig lookup -- unless any of the arguments are type-dependent. */ if (!any_type_dependent_arguments_p (args) && !any_dependent_template_arguments_p (tmpl_args)) { fn = lookup_arg_dependent (identifier, functions, args); if (!fn) { /* The unqualified name could not be resolved. */ if (complain & tf_error) fn = unqualified_fn_lookup_error (cp_expr (identifier, loc)); else fn = identifier; } } if (fn && template_id && fn != error_mark_node) fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args); return fn; } /* Generate an expression for `FN (ARGS)'. This may change the contents of ARGS. If DISALLOW_VIRTUAL is true, the call to FN will be not generated as a virtual call, even if FN is virtual. (This flag is set when encountering an expression where the function name is explicitly qualified. For example a call to `X::f' never generates a virtual call.) Returns code for the call. */ tree finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, bool koenig_p, tsubst_flags_t complain) { tree result; tree orig_fn; vec<tree, va_gc> *orig_args = NULL; if (fn == error_mark_node) return error_mark_node; gcc_assert (!TYPE_P (fn)); /* If FN may be a FUNCTION_DECL obfuscated by force_paren_expr, undo it so that we can tell this is a call to a known function. */ fn = maybe_undo_parenthesized_ref (fn); orig_fn = fn; if (processing_template_decl) { /* If FN is a local extern declaration or set thereof, look them up again at instantiation time. */ if (is_overloaded_fn (fn)) { tree ifn = get_first_fn (fn); if (TREE_CODE (ifn) == FUNCTION_DECL && DECL_LOCAL_FUNCTION_P (ifn)) orig_fn = DECL_NAME (ifn); } /* If the call expression is dependent, build a CALL_EXPR node with no type; type_dependent_expression_p recognizes expressions with no type as being dependent. */ if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args)) { result = build_min_nt_call_vec (orig_fn, *args); SET_EXPR_LOCATION (result, EXPR_LOC_OR_LOC (fn, input_location)); KOENIG_LOOKUP_P (result) = koenig_p; if (is_overloaded_fn (fn)) { fn = get_fns (fn); lookup_keep (fn, true); } if (cfun) { bool abnormal = true; for (lkp_iterator iter (fn); abnormal && iter; ++iter) { tree fndecl = *iter; if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) abnormal = false; } /* FIXME: Stop warning about falling off end of non-void function. But this is wrong. Even if we only see no-return fns at this point, we could select a future-defined return fn during instantiation. Or vice-versa. */ if (abnormal) current_function_returns_abnormally = 1; } return result; } orig_args = make_tree_vector_copy (*args); if (!BASELINK_P (fn) && TREE_CODE (fn) != PSEUDO_DTOR_EXPR && TREE_TYPE (fn) != unknown_type_node) fn = build_non_dependent_expr (fn); make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) { tree member = TREE_OPERAND (fn, 1); if (BASELINK_P (member)) { tree object = TREE_OPERAND (fn, 0); return build_new_method_call (object, member, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } } /* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */ if (TREE_CODE (fn) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = baselink_for_fns (fn); result = NULL_TREE; if (BASELINK_P (fn)) { tree object; /* A call to a member function. From [over.call.func]: If the keyword this is in scope and refers to the class of that member function, or a derived class thereof, then the function call is transformed into a qualified function call using (*this) as the postfix-expression to the left of the . operator.... [Otherwise] a contrived object of type T becomes the implied object argument. In this situation: struct A { void f(); }; struct B : public A {}; struct C : public A { void g() { B::f(); }}; "the class of that member function" refers to `A'. But 11.2 [class.access.base] says that we need to convert 'this' to B* as part of the access, so we pass 'B' to maybe_dummy_object. */ if (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (get_first_fn (fn))) { /* A constructor call always uses a dummy object. (This constructor call which has the form A::A () is actually invalid and we are going to reject it later in build_new_method_call.) */ object = build_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn))); } else object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)), NULL); result = build_new_method_call (object, fn, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } else if (is_overloaded_fn (fn)) { /* If the function is an overloaded builtin, resolve it. */ if (TREE_CODE (fn) == FUNCTION_DECL && (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL || DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD)) result = resolve_overloaded_builtin (input_location, fn, *args); if (!result) { if (warn_sizeof_pointer_memaccess && (complain & tf_warning) && !vec_safe_is_empty (*args) && !processing_template_decl) { location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int i; for (i = 0; i < 3; i++) { tree t; sizeof_arg_loc[i] = UNKNOWN_LOCATION; sizeof_arg[i] = NULL_TREE; if (i >= (*args)->length ()) continue; t = (**args)[i]; if (TREE_CODE (t) != SIZEOF_EXPR) continue; if (SIZEOF_EXPR_TYPE_P (t)) sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0)); else sizeof_arg[i] = TREE_OPERAND (t, 0); sizeof_arg_loc[i] = EXPR_LOCATION (t); } sizeof_pointer_memaccess_warning (sizeof_arg_loc, fn, *args, sizeof_arg, same_type_ignoring_top_level_qualifiers_p); } /* A call to a namespace-scope function. */ result = build_new_function_call (fn, args, complain); } } else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR) { if (!vec_safe_is_empty (*args)) error ("arguments to destructor are not allowed"); /* Mark the pseudo-destructor call as having side-effects so that we do not issue warnings about its use. */ result = build1 (NOP_EXPR, void_type_node, TREE_OPERAND (fn, 0)); TREE_SIDE_EFFECTS (result) = 1; } else if (CLASS_TYPE_P (TREE_TYPE (fn))) /* If the "function" is really an object of class type, it might have an overloaded `operator ()'. */ result = build_op_call (fn, args, complain); if (!result) /* A call where the function is unknown. */ result = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && result != error_mark_node) { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; release_tree_vector (orig_args); result = convert_from_reference (result); } /* Free or retain OVERLOADs from lookup. */ if (is_overloaded_fn (orig_fn)) lookup_keep (get_fns (orig_fn), processing_template_decl); return result; } /* Finish a call to a postfix increment or decrement or EXPR. (Which is indicated by CODE, which should be POSTINCREMENT_EXPR or POSTDECREMENT_EXPR.) */ cp_expr finish_increment_expr (cp_expr expr, enum tree_code code) { /* input_location holds the location of the trailing operator token. Build a location of the form: expr++ ~~~~^~ with the caret at the operator token, ranging from the start of EXPR to the end of the operator token. */ location_t combined_loc = make_location (input_location, expr.get_start (), get_finish (input_location)); cp_expr result = build_x_unary_op (combined_loc, code, expr, tf_warning_or_error); /* TODO: build_x_unary_op doesn't honor the location, so set it here. */ result.set_location (combined_loc); return result; } /* Finish a use of `this'. Returns an expression for `this'. */ tree finish_this_expr (void) { tree result = NULL_TREE; if (current_class_ptr) { tree type = TREE_TYPE (current_class_ref); /* In a lambda expression, 'this' refers to the captured 'this'. */ if (LAMBDA_TYPE_P (type)) result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true); else result = current_class_ptr; } if (result) /* The keyword 'this' is a prvalue expression. */ return rvalue (result); tree fn = current_nonlambda_function (); if (fn && DECL_STATIC_FUNCTION_P (fn)) error ("%<this%> is unavailable for static member functions"); else if (fn) error ("invalid use of %<this%> in non-member function"); else error ("invalid use of %<this%> at top level"); return error_mark_node; } /* Finish a pseudo-destructor expression. If SCOPE is NULL, the expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is the TYPE for the type given. If SCOPE is non-NULL, the expression was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */ tree finish_pseudo_destructor_expr (tree object, tree scope, tree destructor, location_t loc) { if (object == error_mark_node || destructor == error_mark_node) return error_mark_node; gcc_assert (TYPE_P (destructor)); if (!processing_template_decl) { if (scope == error_mark_node) { error_at (loc, "invalid qualifying scope in pseudo-destructor name"); return error_mark_node; } if (is_auto (destructor)) destructor = TREE_TYPE (object); if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor)) { error_at (loc, "qualified type %qT does not match destructor name ~%qT", scope, destructor); return error_mark_node; } /* [expr.pseudo] says both: The type designated by the pseudo-destructor-name shall be the same as the object type. and: The cv-unqualified versions of the object type and of the type designated by the pseudo-destructor-name shall be the same type. We implement the more generous second sentence, since that is what most other compilers do. */ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object), destructor)) { error_at (loc, "%qE is not of type %qT", object, destructor); return error_mark_node; } } return build3_loc (loc, PSEUDO_DTOR_EXPR, void_type_node, object, scope, destructor); } /* Finish an expression of the form CODE EXPR. */ cp_expr finish_unary_op_expr (location_t op_loc, enum tree_code code, cp_expr expr, tsubst_flags_t complain) { /* Build a location of the form: ++expr ^~~~~~ with the caret at the operator token, ranging from the start of the operator token to the end of EXPR. */ location_t combined_loc = make_location (op_loc, op_loc, expr.get_finish ()); cp_expr result = build_x_unary_op (combined_loc, code, expr, complain); /* TODO: build_x_unary_op doesn't always honor the location. */ result.set_location (combined_loc); tree result_ovl, expr_ovl; if (!(complain & tf_warning)) return result; result_ovl = result; expr_ovl = expr; if (!processing_template_decl) expr_ovl = cp_fully_fold (expr_ovl); if (!CONSTANT_CLASS_P (expr_ovl) || TREE_OVERFLOW_P (expr_ovl)) return result; if (!processing_template_decl) result_ovl = cp_fully_fold (result_ovl); if (CONSTANT_CLASS_P (result_ovl) && TREE_OVERFLOW_P (result_ovl)) overflow_warning (combined_loc, result_ovl); return result; } /* Finish a compound-literal expression or C++11 functional cast with aggregate initializer. TYPE is the type to which the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */ tree finish_compound_literal (tree type, tree compound_literal, tsubst_flags_t complain, fcl_t fcl_context) { if (type == error_mark_node) return error_mark_node; if (TREE_CODE (type) == REFERENCE_TYPE) { compound_literal = finish_compound_literal (TREE_TYPE (type), compound_literal, complain, fcl_context); return cp_build_c_cast (type, compound_literal, complain); } if (!TYPE_OBJ_P (type)) { if (complain & tf_error) error ("compound literal of non-object type %qT", type); return error_mark_node; } if (tree anode = type_uses_auto (type)) if (CLASS_PLACEHOLDER_TEMPLATE (anode)) { type = do_auto_deduction (type, compound_literal, anode, complain, adc_variable_type); if (type == error_mark_node) return error_mark_node; } if (processing_template_decl) { TREE_TYPE (compound_literal) = type; /* Mark the expression as a compound literal. */ TREE_HAS_CONSTRUCTOR (compound_literal) = 1; if (fcl_context == fcl_c99) CONSTRUCTOR_C99_COMPOUND_LITERAL (compound_literal) = 1; return compound_literal; } type = complete_type (type); if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST everywhere that deals with function arguments would be a pain, so just wrap it in a TREE_LIST. The parser set a flag so we know that it came from T{} rather than T({}). */ CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1; compound_literal = build_tree_list (NULL_TREE, compound_literal); return build_functional_cast (type, compound_literal, complain); } if (TREE_CODE (type) == ARRAY_TYPE && check_array_initializer (NULL_TREE, type, compound_literal)) return error_mark_node; compound_literal = reshape_init (type, compound_literal, complain); if (SCALAR_TYPE_P (type) && !BRACE_ENCLOSED_INITIALIZER_P (compound_literal) && !check_narrowing (type, compound_literal, complain)) return error_mark_node; if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { cp_complete_array_type_or_error (&type, compound_literal, false, complain); if (type == error_mark_node) return error_mark_node; } compound_literal = digest_init_flags (type, compound_literal, LOOKUP_NORMAL, complain); if (TREE_CODE (compound_literal) == CONSTRUCTOR) { TREE_HAS_CONSTRUCTOR (compound_literal) = true; if (fcl_context == fcl_c99) CONSTRUCTOR_C99_COMPOUND_LITERAL (compound_literal) = 1; } /* Put static/constant array temporaries in static variables. */ /* FIXME all C99 compound literals should be variables rather than C++ temporaries, unless they are used as an aggregate initializer. */ if ((!at_function_scope_p () || CP_TYPE_CONST_P (type)) && fcl_context == fcl_c99 && TREE_CODE (type) == ARRAY_TYPE && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) && initializer_constant_valid_p (compound_literal, type)) { tree decl = create_temporary_var (type); DECL_INITIAL (decl) = compound_literal; TREE_STATIC (decl) = 1; if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type)) { /* 5.19 says that a constant expression can include an lvalue-rvalue conversion applied to "a glvalue of literal type that refers to a non-volatile temporary object initialized with a constant expression". Rather than try to communicate that this VAR_DECL is a temporary, just mark it constexpr. */ DECL_DECLARED_CONSTEXPR_P (decl) = true; DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; TREE_CONSTANT (decl) = true; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); decl = pushdecl_top_level (decl); DECL_NAME (decl) = make_anon_name (); SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); /* Make sure the destructor is callable. */ tree clean = cxx_maybe_build_cleanup (decl, complain); if (clean == error_mark_node) return error_mark_node; return decl; } /* Represent other compound literals with TARGET_EXPR so we produce an lvalue, but can elide copies. */ if (!VECTOR_TYPE_P (type)) compound_literal = get_target_expr_sfinae (compound_literal, complain); return compound_literal; } /* Return the declaration for the function-name variable indicated by ID. */ tree finish_fname (tree id) { tree decl; decl = fname_decl (input_location, C_RID_CODE (id), id); if (processing_template_decl && current_function_decl && decl != error_mark_node) decl = DECL_NAME (decl); return decl; } /* Finish a translation unit. */ void finish_translation_unit (void) { /* In case there were missing closebraces, get us back to the global binding level. */ pop_everything (); while (current_namespace != global_namespace) pop_namespace (); /* Do file scope __FUNCTION__ et al. */ finish_fname_decls (); } /* Finish a template type parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_type_parm (tree aggr, tree identifier) { if (aggr != class_type_node) { permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>"); aggr = class_type_node; } return build_tree_list (aggr, identifier); } /* Finish a template template parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_template_parm (tree aggr, tree identifier) { tree decl = build_decl (input_location, TYPE_DECL, identifier, NULL_TREE); tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE); DECL_TEMPLATE_PARMS (tmpl) = current_template_parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_ARTIFICIAL (decl) = 1; // Associate the constraints with the underlying declaration, // not the template. tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree constr = build_constraints (reqs, NULL_TREE); set_constraints (decl, constr); end_template_decl (); gcc_assert (DECL_TEMPLATE_PARMS (tmpl)); check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend=*/0); return finish_template_type_parm (aggr, tmpl); } /* ARGUMENT is the default-argument value for a template template parameter. If ARGUMENT is invalid, issue error messages and return the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */ tree check_template_template_default_arg (tree argument) { if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) { if (TREE_CODE (argument) == TYPE_DECL) error ("invalid use of type %qT as a default value for a template " "template-parameter", TREE_TYPE (argument)); else error ("invalid default argument for a template template parameter"); return error_mark_node; } return argument; } /* Begin a class definition, as indicated by T. */ tree begin_class_definition (tree t) { if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t))) return error_mark_node; if (processing_template_parmlist) { error ("definition of %q#T inside template parameter list", t); return error_mark_node; } /* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733 are passed the same as decimal scalar types. */ if (TREE_CODE (t) == RECORD_TYPE && !processing_template_decl) { tree ns = TYPE_CONTEXT (t); if (ns && TREE_CODE (ns) == NAMESPACE_DECL && DECL_CONTEXT (ns) == std_node && DECL_NAME (ns) && id_equal (DECL_NAME (ns), "decimal")) { const char *n = TYPE_NAME_STRING (t); if ((strcmp (n, "decimal32") == 0) || (strcmp (n, "decimal64") == 0) || (strcmp (n, "decimal128") == 0)) TYPE_TRANSPARENT_AGGR (t) = 1; } } /* A non-implicit typename comes from code like: template <typename T> struct A { template <typename U> struct A<T>::B ... This is erroneous. */ else if (TREE_CODE (t) == TYPENAME_TYPE) { error ("invalid definition of qualified type %qT", t); t = error_mark_node; } if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t)) { t = make_class_type (RECORD_TYPE); pushtag (make_anon_name (), t, /*tag_scope=*/ts_current); } if (TYPE_BEING_DEFINED (t)) { t = make_class_type (TREE_CODE (t)); pushtag (TYPE_IDENTIFIER (t), t, /*tag_scope=*/ts_current); } maybe_process_partial_specialization (t); pushclass (t); TYPE_BEING_DEFINED (t) = 1; class_binding_level->defining_class_p = 1; if (flag_pack_struct) { tree v; TYPE_PACKED (t) = 1; /* Even though the type is being defined for the first time here, there might have been a forward declaration, so there might be cv-qualified variants of T. */ for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; } /* Reset the interface data, at the earliest possible moment, as it might have been set via a class foo; before. */ if (! TYPE_UNNAMED_P (t)) { struct c_fileinfo *finfo = \ get_fileinfo (LOCATION_FILE (input_location)); CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only; SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown); } reset_specialization(); /* Make a declaration for this class in its own scope. */ build_self_reference (); return t; } /* Finish the member declaration given by DECL. */ void finish_member_declaration (tree decl) { if (decl == error_mark_node || decl == NULL_TREE) return; if (decl == void_type_node) /* The COMPONENT was a friend, not a member, and so there's nothing for us to do. */ return; /* We should see only one DECL at a time. */ gcc_assert (DECL_CHAIN (decl) == NULL_TREE); /* Don't add decls after definition. */ gcc_assert (TYPE_BEING_DEFINED (current_class_type) /* We can add lambda types when late parsing default arguments. */ || LAMBDA_TYPE_P (TREE_TYPE (decl))); /* Set up access control for DECL. */ TREE_PRIVATE (decl) = (current_access_specifier == access_private_node); TREE_PROTECTED (decl) = (current_access_specifier == access_protected_node); if (TREE_CODE (decl) == TEMPLATE_DECL) { TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl); TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl); } /* Mark the DECL as a member of the current class, unless it's a member of an enumeration. */ if (TREE_CODE (decl) != CONST_DECL) DECL_CONTEXT (decl) = current_class_type; if (TREE_CODE (decl) == USING_DECL) /* For now, ignore class-scope USING_DECLS, so that debugging backends do not see them. */ DECL_IGNORED_P (decl) = 1; /* Check for bare parameter packs in the non-static data member declaration. */ if (TREE_CODE (decl) == FIELD_DECL) { if (check_for_bare_parameter_packs (TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = NULL_TREE; } /* [dcl.link] A C language linkage is ignored for the names of class members and the member function type of class member functions. */ if (DECL_LANG_SPECIFIC (decl)) SET_DECL_LANGUAGE (decl, lang_cplusplus); bool add = false; /* Functions and non-functions are added differently. */ if (DECL_DECLARES_FUNCTION_P (decl)) add = add_method (current_class_type, decl, false); /* Enter the DECL into the scope of the class, if the class isn't a closure (whose fields are supposed to be unnamed). */ else if (CLASSTYPE_LAMBDA_EXPR (current_class_type) || pushdecl_class_level (decl)) add = true; if (add) { /* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields go at the beginning. The reason is that legacy_nonfn_member_lookup searches the list in order, and we want a field name to override a type name so that the "struct stat hack" will work. In particular: struct S { enum E { }; static const int E = 5; int ary[S::E]; } s; is valid. */ if (TREE_CODE (decl) == TYPE_DECL) TYPE_FIELDS (current_class_type) = chainon (TYPE_FIELDS (current_class_type), decl); else { DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type); TYPE_FIELDS (current_class_type) = decl; } maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } } /* Finish processing a complete template declaration. The PARMS are the template parameters. */ void finish_template_decl (tree parms) { if (parms) end_template_decl (); else end_specialization (); } // Returns the template type of the class scope being entered. If we're // entering a constrained class scope. TYPE is the class template // scope being entered and we may need to match the intended type with // a constrained specialization. For example: // // template<Object T> // struct S { void f(); }; #1 // // template<Object T> // void S<T>::f() { } #2 // // We check, in #2, that S<T> refers precisely to the type declared by // #1 (i.e., that the constraints match). Note that the following should // be an error since there is no specialization of S<T> that is // unconstrained, but this is not diagnosed here. // // template<typename T> // void S<T>::f() { } // // We cannot diagnose this problem here since this function also matches // qualified template names that are not part of a definition. For example: // // template<Integral T, Floating_point U> // typename pair<T, U>::first_type void f(T, U); // // Here, it is unlikely that there is a partial specialization of // pair constrained for for Integral and Floating_point arguments. // // The general rule is: if a constrained specialization with matching // constraints is found return that type. Also note that if TYPE is not a // class-type (e.g. a typename type), then no fixup is needed. static tree fixup_template_type (tree type) { // Find the template parameter list at the a depth appropriate to // the scope we're trying to enter. tree parms = current_template_parms; int depth = template_class_depth (type); for (int n = processing_template_decl; n > depth && parms; --n) parms = TREE_CHAIN (parms); if (!parms) return type; tree cur_reqs = TEMPLATE_PARMS_CONSTRAINTS (parms); tree cur_constr = build_constraints (cur_reqs, NULL_TREE); // Search for a specialization whose type and constraints match. tree tmpl = CLASSTYPE_TI_TEMPLATE (type); tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); while (specs) { tree spec_constr = get_constraints (TREE_VALUE (specs)); // If the type and constraints match a specialization, then we // are entering that type. if (same_type_p (type, TREE_TYPE (specs)) && equivalent_constraints (cur_constr, spec_constr)) return TREE_TYPE (specs); specs = TREE_CHAIN (specs); } // If no specialization matches, then must return the type // previously found. return type; } /* Finish processing a template-id (which names a type) of the form NAME < ARGS >. Return the TYPE_DECL for the type named by the template-id. If ENTERING_SCOPE is nonzero we are about to enter the scope of template-id indicated. */ tree finish_template_type (tree name, tree args, int entering_scope) { tree type; type = lookup_template_class (name, args, NULL_TREE, NULL_TREE, entering_scope, tf_warning_or_error | tf_user); /* If we might be entering the scope of a partial specialization, find the one with the right constraints. */ if (flag_concepts && entering_scope && CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && dependent_type_p (type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type))) type = fixup_template_type (type); if (type == error_mark_node) return type; else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type)) return TYPE_STUB_DECL (type); else return TYPE_NAME (type); } /* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER. Return a TREE_LIST containing the ACCESS_SPECIFIER and the BASE_CLASS, or NULL_TREE if an error occurred. The ACCESS_SPECIFIER is one of access_{default,public,protected_private}_node. For a virtual base we set TREE_TYPE. */ tree finish_base_specifier (tree base, tree access, bool virtual_p) { tree result; if (base == error_mark_node) { error ("invalid base-class specification"); result = NULL_TREE; } else if (! MAYBE_CLASS_TYPE_P (base)) { error ("%qT is not a class type", base); result = NULL_TREE; } else { if (cp_type_quals (base) != 0) { /* DR 484: Can a base-specifier name a cv-qualified class type? */ base = TYPE_MAIN_VARIANT (base); } result = build_tree_list (access, base); if (virtual_p) TREE_TYPE (result) = integer_type_node; } return result; } /* If FNS is a member function, a set of member functions, or a template-id referring to one or more member functions, return a BASELINK for FNS, incorporating the current access context. Otherwise, return FNS unchanged. */ tree baselink_for_fns (tree fns) { tree scope; tree cl; if (BASELINK_P (fns) || error_operand_p (fns)) return fns; scope = ovl_scope (fns); if (!CLASS_TYPE_P (scope)) return fns; cl = currently_open_derived_class (scope); if (!cl) cl = scope; cl = TYPE_BINFO (cl); return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE); } /* Returns true iff DECL is a variable from a function outside the current one. */ static bool outer_var_p (tree decl) { return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) && DECL_FUNCTION_SCOPE_P (decl) /* Don't get confused by temporaries. */ && DECL_NAME (decl) && (DECL_CONTEXT (decl) != current_function_decl || parsing_nsdmi ())); } /* As above, but also checks that DECL is automatic. */ bool outer_automatic_var_p (tree decl) { return (outer_var_p (decl) && !TREE_STATIC (decl)); } /* DECL satisfies outer_automatic_var_p. Possibly complain about it or rewrite it for lambda capture. If ODR_USE is true, we're being called from mark_use, and we complain about use of constant variables. If ODR_USE is false, we're being called for the id-expression, and we do lambda capture. */ tree process_outer_var_ref (tree decl, tsubst_flags_t complain, bool odr_use) { if (cp_unevaluated_operand) /* It's not a use (3.2) if we're in an unevaluated context. */ return decl; if (decl == error_mark_node) return decl; tree context = DECL_CONTEXT (decl); tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; tree initializer = convert_from_reference (decl); /* Mark it as used now even if the use is ill-formed. */ if (!mark_used (decl, complain)) return error_mark_node; if (parsing_nsdmi ()) containing_function = NULL_TREE; if (containing_function && LAMBDA_FUNCTION_P (containing_function)) { /* Check whether we've already built a proxy. */ tree var = decl; while (is_normal_capture_proxy (var)) var = DECL_CAPTURED_VARIABLE (var); tree d = retrieve_local_specialization (var); if (d && d != decl && is_capture_proxy (d)) { if (DECL_CONTEXT (d) == containing_function) /* We already have an inner proxy. */ return d; else /* We need to capture an outer proxy. */ return process_outer_var_ref (d, complain, odr_use); } } /* If we are in a lambda function, we can move out until we hit 1. the context, 2. a non-lambda function, or 3. a non-default capturing lambda function. */ while (context != containing_function /* containing_function can be null with invalid generic lambdas. */ && containing_function && LAMBDA_FUNCTION_P (containing_function)) { tree closure = DECL_CONTEXT (containing_function); lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure); if (TYPE_CLASS_SCOPE_P (closure)) /* A lambda in an NSDMI (c++/64496). */ break; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) break; lambda_stack = tree_cons (NULL_TREE, lambda_expr, lambda_stack); containing_function = decl_function_context (containing_function); } /* In a lambda within a template, wait until instantiation time to implicitly capture. */ if (context == containing_function && DECL_TEMPLATE_INFO (containing_function) && uses_template_parms (DECL_TI_ARGS (containing_function))) return decl; if (lambda_expr && VAR_P (decl) && DECL_ANON_UNION_VAR_P (decl)) { if (complain & tf_error) error ("cannot capture member %qD of anonymous union", decl); return error_mark_node; } /* Do lambda capture when processing the id-expression, not when odr-using a variable. */ if (!odr_use && context == containing_function) { decl = add_default_capture (lambda_stack, /*id=*/DECL_NAME (decl), initializer); } /* Only an odr-use of an outer automatic variable causes an error, and a constant variable can decay to a prvalue constant without odr-use. So don't complain yet. */ else if (!odr_use && decl_constant_var_p (decl)) return decl; else if (lambda_expr) { if (complain & tf_error) { error ("%qD is not captured", decl); tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) inform (location_of (closure), "the lambda has no capture-default"); else if (TYPE_CLASS_SCOPE_P (closure)) inform (UNKNOWN_LOCATION, "lambda in local class %q+T cannot " "capture variables from the enclosing context", TYPE_CONTEXT (closure)); inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl); } return error_mark_node; } else { if (complain & tf_error) { error (VAR_P (decl) ? G_("use of local variable with automatic storage from " "containing function") : G_("use of parameter from containing function")); inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl); } return error_mark_node; } return decl; } /* ID_EXPRESSION is a representation of parsed, but unprocessed, id-expression. (See cp_parser_id_expression for details.) SCOPE, if non-NULL, is the type or namespace used to explicitly qualify ID_EXPRESSION. DECL is the entity to which that name has been resolved. *CONSTANT_EXPRESSION_P is true if we are presently parsing a constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will be set to true if this expression isn't permitted in a constant-expression, but it is otherwise not set by this function. *ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a constant-expression, but a non-constant expression is also permissible. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. If an error occurs, and it is the kind of error that might cause the parser to abort a tentative parse, *ERROR_MSG is filled in. It is the caller's responsibility to issue the message. *ERROR_MSG will be a string with static storage duration, so the caller need not "free" it. Return an expression for the entity, after issuing appropriate diagnostics. This function is also responsible for transforming a reference to a non-static member into a COMPONENT_REF that makes the use of "this" explicit. Upon return, *IDK will be filled in appropriately. */ cp_expr finish_id_expression (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { decl = strip_using_decl (decl); /* Initialize the output parameters. */ *idk = CP_ID_KIND_NONE; *error_msg = NULL; if (id_expression == error_mark_node) return error_mark_node; /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR || TREE_CODE (decl) == TYPE_DECL) ; /* Look up the name. */ else { if (decl == error_mark_node) { /* Name lookup failed. */ if (scope && (!TYPE_P (scope) || (!dependent_type_p (scope) && !(identifier_p (id_expression) && IDENTIFIER_CONV_OP_P (id_expression) && dependent_type_p (TREE_TYPE (id_expression)))))) { /* If the qualifying type is non-dependent (and the name does not name a conversion operator to a dependent type), issue an error. */ qualified_name_lookup_error (scope, id_expression, decl, location); return error_mark_node; } else if (!scope) { /* It may be resolved via Koenig lookup. */ *idk = CP_ID_KIND_UNQUALIFIED; return id_expression; } else decl = id_expression; } /* If DECL is a variable that would be out of scope under ANSI/ISO rules, but in scope in the ARM, name lookup will succeed. Issue a diagnostic here. */ else decl = check_for_out_of_scope_variable (decl); /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ if (!scope && decl != error_mark_node && identifier_p (id_expression)) maybe_note_name_used_in_class (id_expression, decl); /* A use in unevaluated operand might not be instantiated appropriately if tsubst_copy builds a dummy parm, or if we never instantiate a generic lambda, so mark it now. */ if (processing_template_decl && cp_unevaluated_operand) mark_type_use (decl); /* Disallow uses of local variables from containing functions, except within lambda-expressions. */ if (outer_automatic_var_p (decl)) { decl = process_outer_var_ref (decl, tf_warning_or_error); if (decl == error_mark_node) return error_mark_node; } /* Also disallow uses of function parameters outside the function body, except inside an unevaluated context (i.e. decltype). */ if (TREE_CODE (decl) == PARM_DECL && DECL_CONTEXT (decl) == NULL_TREE && !cp_unevaluated_operand) { *error_msg = G_("use of parameter outside function body"); return error_mark_node; } } /* If we didn't find anything, or what we found was a type, then this wasn't really an id-expression. */ if (TREE_CODE (decl) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (decl)) { *error_msg = G_("missing template arguments"); return error_mark_node; } else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == NAMESPACE_DECL) { *error_msg = G_("expected primary-expression"); return error_mark_node; } /* If the name resolved to a template parameter, there is no need to look it up again later. */ if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl)) || TREE_CODE (decl) == TEMPLATE_PARM_INDEX) { tree r; *idk = CP_ID_KIND_NONE; if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX) decl = TEMPLATE_PARM_DECL (decl); r = convert_from_reference (DECL_INITIAL (decl)); if (integral_constant_expression_p && !dependent_type_p (TREE_TYPE (decl)) && !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r)))) { if (!allow_non_integral_constant_expression_p) error ("template parameter %qD of type %qT is not allowed in " "an integral constant expression because it is not of " "integral or enumeration type", decl, TREE_TYPE (decl)); *non_integral_constant_expression_p = true; } return r; } else { bool dependent_p = type_dependent_expression_p (decl); /* If the declaration was explicitly qualified indicate that. The semantics of `A::f(3)' are different than `f(3)' if `f' is virtual. */ *idk = (scope ? CP_ID_KIND_QUALIFIED : (TREE_CODE (decl) == TEMPLATE_ID_EXPR ? CP_ID_KIND_TEMPLATE_ID : (dependent_p ? CP_ID_KIND_UNQUALIFIED_DEPENDENT : CP_ID_KIND_UNQUALIFIED))); if (dependent_p && DECL_P (decl) && any_dependent_type_attributes_p (DECL_ATTRIBUTES (decl))) /* Dependent type attributes on the decl mean that the TREE_TYPE is wrong, so just return the identifier. */ return id_expression; if (TREE_CODE (decl) == NAMESPACE_DECL) { error ("use of namespace %qD as expression", decl); return error_mark_node; } else if (DECL_CLASS_TEMPLATE_P (decl)) { error ("use of class template %qT as expression", decl); return error_mark_node; } else if (TREE_CODE (decl) == TREE_LIST) { /* Ambiguous reference to base members. */ error ("request for member %qD is ambiguous in " "multiple inheritance lattice", id_expression); print_candidates (decl); return error_mark_node; } /* Mark variable-like entities as used. Functions are similarly marked either below or after overload resolution. */ if ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == RESULT_DECL) && !mark_used (decl)) return error_mark_node; /* Only certain kinds of names are allowed in constant expression. Template parameters have already been handled above. */ if (! error_operand_p (decl) && !dependent_p && integral_constant_expression_p && ! decl_constant_var_p (decl) && TREE_CODE (decl) != CONST_DECL && ! builtin_valid_in_constant_expr_p (decl)) { if (!allow_non_integral_constant_expression_p) { error ("%qD cannot appear in a constant-expression", decl); return error_mark_node; } *non_integral_constant_expression_p = true; } tree wrap; if (VAR_P (decl) && !cp_unevaluated_operand && !processing_template_decl && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) && CP_DECL_THREAD_LOCAL_P (decl) && (wrap = get_tls_wrapper_fn (decl))) { /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ decl = build_cxx_call (wrap, 0, NULL, tf_warning_or_error); } else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && !dependent_p && variable_template_p (TREE_OPERAND (decl, 0))) { decl = finish_template_variable (decl); mark_used (decl); decl = convert_from_reference (decl); } else if (scope) { if (TREE_CODE (decl) == SCOPE_REF) { gcc_assert (same_type_p (scope, TREE_OPERAND (decl, 0))); decl = TREE_OPERAND (decl, 1); } decl = (adjust_result_of_qualified_name_lookup (decl, scope, current_nonlambda_class_type())); if (TREE_CODE (decl) == FUNCTION_DECL) mark_used (decl); if (TYPE_P (scope)) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else decl = convert_from_reference (decl); } else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); } else if (is_overloaded_fn (decl)) { tree first_fn = get_first_fn (decl); if (TREE_CODE (first_fn) == TEMPLATE_DECL) first_fn = DECL_TEMPLATE_RESULT (first_fn); /* [basic.def.odr]: "A function whose name appears as a potentially-evaluated expression is odr-used if it is the unique lookup result". But only mark it if it's a complete postfix-expression; in a call, ADL might select a different function, and we'll call mark_used in build_over_call. */ if (done && !really_overloaded_fn (decl) && !mark_used (first_fn)) return error_mark_node; if (!template_arg_p && TREE_CODE (first_fn) == FUNCTION_DECL && DECL_FUNCTION_MEMBER_P (first_fn) && !shared_member_p (decl)) { /* A set of member functions. */ decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0); return finish_class_member_access_expr (decl, id_expression, /*template_p=*/false, tf_warning_or_error); } decl = baselink_for_fns (decl); } else { if (DECL_P (decl) && DECL_NONLOCAL (decl) && DECL_CLASS_SCOPE_P (decl)) { tree context = context_for_name_lookup (decl); if (context != current_class_type) { tree path = currently_open_derived_class (context); perform_or_defer_access_check (TYPE_BINFO (path), decl, decl, tf_warning_or_error); } } decl = convert_from_reference (decl); } } return cp_expr (decl, location); } /* Implement the __typeof keyword: Return the type of EXPR, suitable for use as a type-specifier. */ tree finish_typeof (tree expr) { tree type; if (type_dependent_expression_p (expr)) { type = cxx_make_type (TYPEOF_TYPE); TYPEOF_TYPE_EXPR (type) = expr; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } expr = mark_type_use (expr); type = unlowered_expr_type (expr); if (!type || type == unknown_type_node) { error ("type of %qE is unknown", expr); return error_mark_node; } return type; } /* Implement the __underlying_type keyword: Return the underlying type of TYPE, suitable for use as a type-specifier. */ tree finish_underlying_type (tree type) { tree underlying_type; if (processing_template_decl) { underlying_type = cxx_make_type (UNDERLYING_TYPE); UNDERLYING_TYPE_TYPE (underlying_type) = type; SET_TYPE_STRUCTURAL_EQUALITY (underlying_type); return underlying_type; } if (!complete_type_or_else (type, NULL_TREE)) return error_mark_node; if (TREE_CODE (type) != ENUMERAL_TYPE) { error ("%qT is not an enumeration type", type); return error_mark_node; } underlying_type = ENUM_UNDERLYING_TYPE (type); /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information. See finish_enum_value_list for details. */ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type)) underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type), TYPE_UNSIGNED (underlying_type)); return underlying_type; } /* Implement the __direct_bases keyword: Return the direct base classes of type. */ tree calculate_direct_bases (tree type, tsubst_flags_t complain) { if (!complete_type_or_maybe_complain (type, NULL_TREE, complain) || !NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); vec<tree, va_gc> *vector = make_tree_vector (); vec<tree, va_gc> *base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type)); tree binfo; unsigned i; /* Virtual bases are initialized first */ for (i = 0; base_binfos->iterate (i, &binfo); i++) if (BINFO_VIRTUAL_P (binfo)) vec_safe_push (vector, binfo); /* Now non-virtuals */ for (i = 0; base_binfos->iterate (i, &binfo); i++) if (!BINFO_VIRTUAL_P (binfo)) vec_safe_push (vector, binfo); tree bases_vec = make_tree_vec (vector->length ()); for (i = 0; i < vector->length (); ++i) TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]); release_tree_vector (vector); return bases_vec; } /* Implement the __bases keyword: Return the base classes of type */ /* Find morally non-virtual base classes by walking binfo hierarchy */ /* Virtual base classes are handled separately in finish_bases */ static tree dfs_calculate_bases_pre (tree binfo, void * /*data_*/) { /* Don't walk bases of virtual bases */ return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE; } static tree dfs_calculate_bases_post (tree binfo, void *data_) { vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_); if (!BINFO_VIRTUAL_P (binfo)) vec_safe_push (*data, BINFO_TYPE (binfo)); return NULL_TREE; } /* Calculates the morally non-virtual base classes of a class */ static vec<tree, va_gc> * calculate_bases_helper (tree type) { vec<tree, va_gc> *vector = make_tree_vector (); /* Now add non-virtual base classes in order of construction */ if (TYPE_BINFO (type)) dfs_walk_all (TYPE_BINFO (type), dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector); return vector; } tree calculate_bases (tree type, tsubst_flags_t complain) { if (!complete_type_or_maybe_complain (type, NULL_TREE, complain) || !NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); vec<tree, va_gc> *vector = make_tree_vector (); tree bases_vec = NULL_TREE; unsigned i; vec<tree, va_gc> *vbases; vec<tree, va_gc> *nonvbases; tree binfo; /* First go through virtual base classes */ for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0; vec_safe_iterate (vbases, i, &binfo); i++) { vec<tree, va_gc> *vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo)); vec_safe_splice (vector, vbase_bases); release_tree_vector (vbase_bases); } /* Now for the non-virtual bases */ nonvbases = calculate_bases_helper (type); vec_safe_splice (vector, nonvbases); release_tree_vector (nonvbases); /* Note that during error recovery vector->length can even be zero. */ if (vector->length () > 1) { /* Last element is entire class, so don't copy */ bases_vec = make_tree_vec (vector->length () - 1); for (i = 0; i < vector->length () - 1; ++i) TREE_VEC_ELT (bases_vec, i) = (*vector)[i]; } else bases_vec = make_tree_vec (0); release_tree_vector (vector); return bases_vec; } tree finish_bases (tree type, bool direct) { tree bases = NULL_TREE; if (!processing_template_decl) { /* Parameter packs can only be used in templates */ error ("Parameter pack __bases only valid in template declaration"); return error_mark_node; } bases = cxx_make_type (BASES); BASES_TYPE (bases) = type; BASES_DIRECT (bases) = direct; SET_TYPE_STRUCTURAL_EQUALITY (bases); return bases; } /* Perform C++-specific checks for __builtin_offsetof before calling fold_offsetof. */ tree finish_offsetof (tree object_ptr, tree expr, location_t loc) { /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) { expr = build2 (OFFSETOF_EXPR, size_type_node, expr, object_ptr); SET_EXPR_LOCATION (expr, loc); return expr; } if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR) { error ("cannot apply %<offsetof%> to destructor %<~%T%>", TREE_OPERAND (expr, 2)); return error_mark_node; } if (TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (expr)) == METHOD_TYPE || TREE_TYPE (expr) == unknown_type_node) { while (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); if (DECL_P (expr)) { error ("cannot apply %<offsetof%> to member function %qD", expr); inform (DECL_SOURCE_LOCATION (expr), "declared here"); } else error ("cannot apply %<offsetof%> to member function"); return error_mark_node; } if (TREE_CODE (expr) == CONST_DECL) { error ("cannot apply %<offsetof%> to an enumerator %qD", expr); return error_mark_node; } if (REFERENCE_REF_P (expr)) expr = TREE_OPERAND (expr, 0); if (!complete_type_or_else (TREE_TYPE (TREE_TYPE (object_ptr)), object_ptr)) return error_mark_node; if (warn_invalid_offsetof && CLASS_TYPE_P (TREE_TYPE (TREE_TYPE (object_ptr))) && CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (TREE_TYPE (object_ptr))) && cp_unevaluated_operand == 0) warning_at (loc, OPT_Winvalid_offsetof, "offsetof within " "non-standard-layout type %qT is conditionally-supported", TREE_TYPE (TREE_TYPE (object_ptr))); return fold_offsetof (expr); } /* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This function is broken out from the above for the benefit of the tree-ssa project. */ void simplify_aggr_init_expr (tree *tp) { tree aggr_init_expr = *tp; /* Form an appropriate CALL_EXPR. */ tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr); tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr); tree type = TREE_TYPE (slot); tree call_expr; enum style_t { ctor, arg, pcc } style; if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr)) style = ctor; #ifdef PCC_STATIC_STRUCT_RETURN else if (1) style = pcc; #endif else { gcc_assert (TREE_ADDRESSABLE (type)); style = arg; } call_expr = build_call_array_loc (input_location, TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))), fn, aggr_init_expr_nargs (aggr_init_expr), AGGR_INIT_EXPR_ARGP (aggr_init_expr)); TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr); CALL_FROM_THUNK_P (call_expr) = AGGR_INIT_FROM_THUNK_P (aggr_init_expr); CALL_EXPR_OPERATOR_SYNTAX (call_expr) = CALL_EXPR_OPERATOR_SYNTAX (aggr_init_expr); CALL_EXPR_ORDERED_ARGS (call_expr) = CALL_EXPR_ORDERED_ARGS (aggr_init_expr); CALL_EXPR_REVERSE_ARGS (call_expr) = CALL_EXPR_REVERSE_ARGS (aggr_init_expr); if (style == ctor) { /* Replace the first argument to the ctor with the address of the slot. */ cxx_mark_addressable (slot); CALL_EXPR_ARG (call_expr, 0) = build1 (ADDR_EXPR, build_pointer_type (type), slot); } else if (style == arg) { /* Just mark it addressable here, and leave the rest to expand_call{,_inline}. */ cxx_mark_addressable (slot); CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true; call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr); } else if (style == pcc) { /* If we're using the non-reentrant PCC calling convention, then we need to copy the returned value out of the static buffer into the SLOT. */ push_deferring_access_checks (dk_no_check); call_expr = build_aggr_init (slot, call_expr, DIRECT_BIND | LOOKUP_ONLYCONVERTING, tf_warning_or_error); pop_deferring_access_checks (); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot); } if (AGGR_INIT_ZERO_FIRST (aggr_init_expr)) { tree init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); init = build2 (INIT_EXPR, void_type_node, slot, init); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr), init, call_expr); } *tp = call_expr; } /* Emit all thunks to FN that should be emitted when FN is emitted. */ void emit_associated_thunks (tree fn) { /* When we use vcall offsets, we emit thunks with the virtual functions to which they thunk. The whole point of vcall offsets is so that you can know statically the entire set of thunks that will ever be needed for a given virtual function, thereby enabling you to output all the thunks with the function itself. */ if (DECL_VIRTUAL_P (fn) /* Do not emit thunks for extern template instantiations. */ && ! DECL_REALLY_EXTERN (fn)) { tree thunk; for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk)) { if (!THUNK_ALIAS (thunk)) { use_thunk (thunk, /*emit_p=*/1); if (DECL_RESULT_THUNK_P (thunk)) { tree probe; for (probe = DECL_THUNKS (thunk); probe; probe = DECL_CHAIN (probe)) use_thunk (probe, /*emit_p=*/1); } } else gcc_assert (!DECL_THUNKS (thunk)); } } } /* Generate RTL for FN. */ bool expand_or_defer_fn_1 (tree fn) { /* When the parser calls us after finishing the body of a template function, we don't really want to expand the body. */ if (processing_template_decl) { /* Normally, collection only occurs in rest_of_compilation. So, if we don't collect here, we never collect junk generated during the processing of templates until we hit a non-template function. It's not safe to do this inside a nested class, though, as the parser may have local state that is not a GC root. */ if (!function_depth) ggc_collect (); return false; } gcc_assert (DECL_SAVED_TREE (fn)); /* We make a decision about linkage for these functions at the end of the compilation. Until that point, we do not want the back end to output them -- but we do want it to see the bodies of these functions so that it can inline them as appropriate. */ if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn)) { if (DECL_INTERFACE_KNOWN (fn)) /* We've already made a decision as to how this function will be handled. */; else if (!at_eof) tentative_decl_linkage (fn); else import_export_decl (fn); /* If the user wants us to keep all inline functions, then mark this function as needed so that finish_file will make sure to output it later. Similarly, all dllexport'd functions must be emitted; there may be callers in other DLLs. */ if (DECL_DECLARED_INLINE_P (fn) && !DECL_REALLY_EXTERN (fn) && (flag_keep_inline_functions || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))))) { mark_needed (fn); DECL_EXTERNAL (fn) = 0; } } /* If this is a constructor or destructor body, we have to clone it. */ if (maybe_clone_body (fn)) { /* We don't want to process FN again, so pretend we've written it out, even though we haven't. */ TREE_ASM_WRITTEN (fn) = 1; /* If this is a constexpr function, keep DECL_SAVED_TREE. */ if (!DECL_DECLARED_CONSTEXPR_P (fn)) DECL_SAVED_TREE (fn) = NULL_TREE; return false; } /* There's no reason to do any of the work here if we're only doing semantic analysis; this code just generates RTL. */ if (flag_syntax_only) return false; return true; } void expand_or_defer_fn (tree fn) { if (expand_or_defer_fn_1 (fn)) { function_depth++; /* Expand or defer, at the whim of the compilation unit manager. */ cgraph_node::finalize_function (fn, function_depth > 1); emit_associated_thunks (fn); function_depth--; } } struct nrv_data { nrv_data () : visited (37) {} tree var; tree result; hash_table<nofree_ptr_hash <tree_node> > visited; }; /* Helper function for walk_tree, used by finalize_nrv below. */ static tree finalize_nrv_r (tree* tp, int* walk_subtrees, void* data) { struct nrv_data *dp = (struct nrv_data *)data; tree_node **slot; /* No need to walk into types. There wouldn't be any need to walk into non-statements, except that we have to consider STMT_EXPRs. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* Change all returns to just refer to the RESULT_DECL; this is a nop, but differs from using NULL_TREE in that it indicates that we care about the value of the RESULT_DECL. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Change all cleanups for the NRV to only run when an exception is thrown. */ else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == dp->var) CLEANUP_EH_ONLY (*tp) = 1; /* Replace the DECL_EXPR for the NRV with an initialization of the RESULT_DECL, if needed. */ else if (TREE_CODE (*tp) == DECL_EXPR && DECL_EXPR_DECL (*tp) == dp->var) { tree init; if (DECL_INITIAL (dp->var) && DECL_INITIAL (dp->var) != error_mark_node) init = build2 (INIT_EXPR, void_type_node, dp->result, DECL_INITIAL (dp->var)); else init = build_empty_stmt (EXPR_LOCATION (*tp)); DECL_INITIAL (dp->var) = NULL_TREE; SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp)); *tp = init; } /* And replace all uses of the NRV with the RESULT_DECL. */ else if (*tp == dp->var) *tp = dp->result; /* Avoid walking into the same tree more than once. Unfortunately, we can't just use walk_tree_without duplicates because it would only call us for the first occurrence of dp->var in the function body. */ slot = dp->visited.find_slot (*tp, INSERT); if (*slot) *walk_subtrees = 0; else *slot = *tp; /* Keep iterating. */ return NULL_TREE; } /* Called from finish_function to implement the named return value optimization by overriding all the RETURN_EXPRs and pertinent CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the RESULT_DECL for the function. */ void finalize_nrv (tree *tp, tree var, tree result) { struct nrv_data data; /* Copy name from VAR to RESULT. */ DECL_NAME (result) = DECL_NAME (var); /* Don't forget that we take its address. */ TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var); /* Finally set DECL_VALUE_EXPR to avoid assigning a stack slot at -O0 for the original var and debug info uses RESULT location for VAR. */ SET_DECL_VALUE_EXPR (var, result); DECL_HAS_VALUE_EXPR_P (var) = 1; data.var = var; data.result = result; cp_walk_tree (tp, finalize_nrv_r, &data, 0); } /* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */ bool cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor, bool need_copy_ctor, bool need_copy_assignment, bool need_dtor) { int save_errorcount = errorcount; tree info, t; /* Always allocate 3 elements for simplicity. These are the function decls for the ctor, dtor, and assignment op. This layout is known to the three lang hooks, cxx_omp_clause_default_init, cxx_omp_clause_copy_init, and cxx_omp_clause_assign_op. */ info = make_tree_vec (3); CP_OMP_CLAUSE_INFO (c) = info; if (need_default_ctor || need_copy_ctor) { if (need_default_ctor) t = get_default_ctor (type); else t = get_copy_ctor (type, tf_warning_or_error); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 0) = t; } if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error); if (need_copy_assignment) { t = get_copy_assign (type); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 2) = t; } return errorcount != save_errorcount; } /* If DECL is DECL_OMP_PRIVATIZED_MEMBER, return corresponding FIELD_DECL, otherwise return DECL itself. */ static tree omp_clause_decl_field (tree decl) { if (VAR_P (decl) && DECL_HAS_VALUE_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_LANG_SPECIFIC (decl) && DECL_OMP_PRIVATIZED_MEMBER (decl)) { tree f = DECL_VALUE_EXPR (decl); if (INDIRECT_REF_P (f)) f = TREE_OPERAND (f, 0); if (TREE_CODE (f) == COMPONENT_REF) { f = TREE_OPERAND (f, 1); gcc_assert (TREE_CODE (f) == FIELD_DECL); return f; } } return NULL_TREE; } /* Adjust DECL if needed for printing using %qE. */ static tree omp_clause_printable_decl (tree decl) { tree t = omp_clause_decl_field (decl); if (t) return t; return decl; } /* For a FIELD_DECL F and corresponding DECL_OMP_PRIVATIZED_MEMBER VAR_DECL T that doesn't need a DECL_EXPR added, record it for privatization. */ static void omp_note_field_privatization (tree f, tree t) { if (!omp_private_member_map) omp_private_member_map = new hash_map<tree, tree>; tree &v = omp_private_member_map->get_or_insert (f); if (v == NULL_TREE) { v = t; omp_private_member_vec.safe_push (f); /* Signal that we don't want to create DECL_EXPR for this dummy var. */ omp_private_member_vec.safe_push (integer_zero_node); } } /* Privatize FIELD_DECL T, return corresponding DECL_OMP_PRIVATIZED_MEMBER dummy VAR_DECL. */ tree omp_privatize_field (tree t, bool shared) { tree m = finish_non_static_data_member (t, NULL_TREE, NULL_TREE); if (m == error_mark_node) return error_mark_node; if (!omp_private_member_map && !shared) omp_private_member_map = new hash_map<tree, tree>; if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE) { gcc_assert (INDIRECT_REF_P (m)); m = TREE_OPERAND (m, 0); } tree vb = NULL_TREE; tree &v = shared ? vb : omp_private_member_map->get_or_insert (t); if (v == NULL_TREE) { v = create_temporary_var (TREE_TYPE (m)); retrofit_lang_decl (v); DECL_OMP_PRIVATIZED_MEMBER (v) = 1; SET_DECL_VALUE_EXPR (v, m); DECL_HAS_VALUE_EXPR_P (v) = 1; if (!shared) omp_private_member_vec.safe_push (t); } return v; } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one known not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one, enum c_omp_region_type ort) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) t = TREE_OPERAND (t, 0); ret = t; if (TREE_CODE (t) == COMPONENT_REF && ort == C_ORT_OMP && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM) && !type_dependent_expression_p (t)) { if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_TYPE (TREE_OPERAND (t, 0)) && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); return error_mark_node; } t = TREE_OPERAND (t, 0); } if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) return NULL_TREE; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (TREE_CODE (t) == PARM_DECL && DECL_ARTIFICIAL (t) && DECL_NAME (t) == this_identifier) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (type_dependent_expression_p (ret)) return NULL_TREE; ret = convert_from_reference (ret); return ret; } if (ort == C_ORT_OMP && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && TREE_CODE (TREE_CHAIN (t)) == FIELD_DECL) TREE_CHAIN (t) = omp_privatize_field (TREE_CHAIN (t), false); ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one, ort); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if ((low_bound && type_dependent_expression_p (low_bound)) || (length && type_dependent_expression_p (length))) return NULL_TREE; if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound) low_bound = mark_rvalue_use (low_bound); if (length) length = mark_rvalue_use (length); /* We need to reduce to real constant-values for checks below. */ if (length) length = fold_simple (length); if (low_bound) low_bound = fold_simple (low_bound); if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (length != NULL_TREE) { if (!integer_nonzerop (length)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (integer_zerop (length)) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else maybe_zero_len = true; } if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type))); size = size_binop (PLUS_EXPR, size, size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } maybe_zero_len = true; } else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TREE_CODE (type) == POINTER_TYPE) { if (length == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c, enum c_omp_region_type ort) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree, 10> types; tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types, maybe_zero_len, first_non_one, ort); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = OMP_CLAUSE_DECL (c); tree tem = NULL_TREE; if (processing_template_decl) return false; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); OMP_CLAUSE_DECL (c) = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; if (processing_template_decl && maybe_zero_len) return false; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (!processing_template_decl && length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else if (processing_template_decl) continue; else { tree l; if (i > first_non_one && ((length && integer_nonzerop (length)) || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); tree eltype = TREE_TYPE (types[num - 1]); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) size = size_binop (EXACT_DIV_EXPR, size, size_in_bytes (eltype)); size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (!processing_template_decl) { if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { size = size_binop (MINUS_EXPR, size, size_one_node); tree index_type = build_index_type (size); tree eltype = TREE_TYPE (first); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); tree type = build_array_type (eltype, index_type); tree ptype = build_pointer_type (eltype); if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t)))) t = convert_from_reference (t); else if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); tree t2 = build_fold_addr_expr (first); t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t2); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t2, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t)); if (tree_fits_shwi_p (t2)) t = build2 (MEM_REF, type, t, build_int_cst (ptype, tree_to_shwi (t2))); else { t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, t2); t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR, TREE_TYPE (t), t, t2); t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0)); } OMP_CLAUSE_DECL (c) = t; return false; } OMP_CLAUSE_DECL (c) = first; OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)) return false; if (ort == C_ORT_OMP || ort == C_ORT_ACC) switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_ALLOC: case GOMP_MAP_TO: case GOMP_MAP_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_ALWAYS_TOFROM: case GOMP_MAP_RELEASE: case GOMP_MAP_DELETE: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_FORCE_PRESENT: OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1; break; default: break; } tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP && ort != C_ORT_ACC) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); else if (TREE_CODE (t) == COMPONENT_REF) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER); else if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) { t = TREE_OPERAND (t, 0); OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER); } else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && !cxx_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); ptr = convert_from_reference (ptr); if (!POINTER_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; ptr = OMP_CLAUSE_DECL (c2); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && TREE_CODE (TREE_TYPE (ptr)) == REFERENCE_TYPE && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (ptr)))) { tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, OMP_CLAUSE_MAP_KIND (c2)); OMP_CLAUSE_DECL (c3) = ptr; if (OMP_CLAUSE_MAP_KIND (c2) == GOMP_MAP_ALWAYS_POINTER) OMP_CLAUSE_DECL (c2) = build_simple_mem_ref (ptr); else OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr); OMP_CLAUSE_SIZE (c3) = size_zero_node; OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2); OMP_CLAUSE_CHAIN (c2) = c3; } } } return false; } /* Return identifier to look up for omp declare reduction. */ tree omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type) { const char *p = NULL; const char *m = NULL; switch (reduction_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case BIT_IOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: reduction_id = ovl_op_identifier (false, reduction_code); break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } if (type != NULL_TREE) m = mangle_type_string (TYPE_MAIN_VARIANT (type)); const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); if (strncmp (p, prefix, lenp - 1) == 0) lenp = 1; size_t len = strlen (p); size_t lenm = m ? strlen (m) + 1 : 0; char *name = XALLOCAVEC (char, lenp + len + lenm); if (lenp > 1) memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); if (m) { name[lenp + len - 1] = '~'; memcpy (name + lenp + len, m, lenm); } return get_identifier (name); } /* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial FUNCTION_DECL or NULL_TREE if not found. */ static tree omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp, vec<tree> *ambiguousp) { tree orig_id = id; tree baselink = NULL_TREE; if (identifier_p (id)) { cp_id_kind idk; bool nonint_cst_expression_p; const char *error_msg; id = omp_reduction_id (ERROR_MARK, id, type); tree decl = lookup_name (id); if (decl == NULL_TREE) decl = error_mark_node; id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true, &nonint_cst_expression_p, false, true, false, false, &error_msg, loc); if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (id)) { vec<tree, va_gc> *args = NULL; vec_safe_push (args, build_reference_type (type)); id = perform_koenig_lookup (id, args, tf_none); } } else if (TREE_CODE (id) == SCOPE_REF) id = lookup_qualified_name (TREE_OPERAND (id, 0), omp_reduction_id (ERROR_MARK, TREE_OPERAND (id, 1), type), false, false); tree fns = id; id = NULL_TREE; if (fns && is_overloaded_fn (fns)) { for (lkp_iterator iter (get_fns (fns)); iter; ++iter) { tree fndecl = *iter; if (TREE_CODE (fndecl) == FUNCTION_DECL) { tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); if (same_type_p (TREE_TYPE (argtype), type)) { id = fndecl; break; } } } if (id && BASELINK_P (fns)) { if (baselinkp) *baselinkp = fns; else baselink = fns; } } if (!id && CLASS_TYPE_P (type) && TYPE_BINFO (type)) { vec<tree> ambiguous = vNULL; tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE; unsigned int ix; if (ambiguousp == NULL) ambiguousp = &ambiguous; for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++) { id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo), baselinkp ? baselinkp : &baselink, ambiguousp); if (id == NULL_TREE) continue; if (!ambiguousp->is_empty ()) ambiguousp->safe_push (id); else if (ret != NULL_TREE) { ambiguousp->safe_push (ret); ambiguousp->safe_push (id); ret = NULL_TREE; } else ret = id; } if (ambiguousp != &ambiguous) return ret; if (!ambiguous.is_empty ()) { const char *str = _("candidates are:"); unsigned int idx; tree udr; error_at (loc, "user defined reduction lookup is ambiguous"); FOR_EACH_VEC_ELT (ambiguous, idx, udr) { inform (DECL_SOURCE_LOCATION (udr), "%s %#qD", str, udr); if (idx == 0) str = get_spaces (str); } ambiguous.release (); ret = error_mark_node; baselink = NULL_TREE; } id = ret; } if (id && baselink) perform_or_defer_access_check (BASELINK_BINFO (baselink), id, id, tf_warning_or_error); return id; } /* Helper function for cp_parser_omp_declare_reduction_exprs and tsubst_omp_udr. Remove CLEANUP_STMT for data (omp_priv variable). Also append INIT_EXPR for DECL_INITIAL of omp_priv after its DECL_EXPR. */ tree cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data) { if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data) *tp = CLEANUP_BODY (*tp); else if (TREE_CODE (*tp) == DECL_EXPR) { tree decl = DECL_EXPR_DECL (*tp); if (!processing_template_decl && decl == (tree) data && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { tree list = NULL_TREE; append_to_statement_list_force (*tp, &list); tree init_expr = build2 (INIT_EXPR, void_type_node, decl, DECL_INITIAL (decl)); DECL_INITIAL (decl) = NULL_TREE; append_to_statement_list_force (init_expr, &list); *tp = list; } } return NULL_TREE; } /* Data passed from cp_check_omp_declare_reduction to cp_check_omp_declare_reduction_r. */ struct cp_check_omp_declare_reduction_data { location_t loc; tree stmts[7]; bool combiner_p; }; /* Helper function for cp_check_omp_declare_reduction, called via cp_walk_tree. */ static tree cp_check_omp_declare_reduction_r (tree *tp, int *, void *data) { struct cp_check_omp_declare_reduction_data *udr_data = (struct cp_check_omp_declare_reduction_data *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3]) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4])) { location_t loc = udr_data->loc; if (udr_data->combiner_p) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } /* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */ void cp_check_omp_declare_reduction (tree udr) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr))); gcc_assert (TREE_CODE (type) == REFERENCE_TYPE); type = TREE_TYPE (type); int i; location_t loc = DECL_SOURCE_LOCATION (udr); if (type == error_mark_node) return; if (ARITHMETIC_TYPE_P (type)) { static enum tree_code predef_codes[] = { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR, BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR }; for (i = 0; i < 8; i++) { tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE); const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr)); const char *n2 = IDENTIFIER_POINTER (id); if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0 && (n1[IDENTIFIER_LENGTH (id)] == '~' || n1[IDENTIFIER_LENGTH (id)] == '\0')) break; } if (i == 8 && TREE_CODE (type) != COMPLEX_EXPR) { const char prefix_minmax[] = "omp declare reduction m"; size_t prefix_size = sizeof (prefix_minmax) - 1; const char *n = IDENTIFIER_POINTER (DECL_NAME (udr)); if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)), prefix_minmax, prefix_size) == 0 && ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n') || (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x')) && (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0')) i = 0; } if (i < 8) { error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); return; } } else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); return; } else if (TREE_CODE (type) == REFERENCE_TYPE) { error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>", type); return; } else if (TYPE_QUALS_NO_ADDR_SPACE (type)) { error_at (loc, "const, volatile or __restrict qualified type %qT in " "%<#pragma omp declare reduction%>", type); return; } tree body = DECL_SAVED_TREE (udr); if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST) return; tree_stmt_iterator tsi; struct cp_check_omp_declare_reduction_data data; memset (data.stmts, 0, sizeof data.stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) data.stmts[i] = tsi_stmt (tsi); data.loc = loc; gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR && TREE_CODE (data.stmts[1]) == DECL_EXPR); if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0]))) return; data.combiner_p = true; if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; } if (i >= 6) { gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR && TREE_CODE (data.stmts[4]) == DECL_EXPR); data.combiner_p = false; if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r, &data, NULL) || cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])), cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; if (i == 7) gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR); } } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of finish_omp_clauses, called via cp_walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C. Return true if there is some error and the clause should be removed. */ static bool finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor) { tree t = OMP_CLAUSE_DECL (c); bool predefined = false; if (TREE_CODE (t) == TREE_LIST) { gcc_assert (processing_template_decl); return false; } tree type = TREE_TYPE (t); if (TREE_CODE (t) == MEM_REF) type = TREE_TYPE (type); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (TREE_CODE (type) == ARRAY_TYPE) { tree oatype = type; gcc_assert (TREE_CODE (t) != MEM_REF); while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype), TYPE_SIZE_UNIT (type)); if (integer_zerop (size)) { error ("%qE in %<reduction%> clause is a zero size array", omp_clause_printable_decl (t)); return true; } size = size_binop (MINUS_EXPR, size, size_one_node); tree index_type = build_index_type (size); tree atype = build_array_type (type, index_type); tree ptype = build_pointer_type (type); if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0)); OMP_CLAUSE_DECL (c) = t; } } if (type == error_mark_node) return true; else if (ARITHMETIC_TYPE_P (type)) switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: predefined = true; break; case MIN_EXPR: case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) break; predefined = true; break; default: break; } else if (TYPE_READONLY (type)) { error ("%qE has const type for %<reduction%>", omp_clause_printable_decl (t)); return true; } else if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; OMP_CLAUSE_DECL (c) = t; } if (predefined) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; return false; } else if (processing_template_decl) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == error_mark_node) return true; return false; } tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (type); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; if (id == NULL_TREE) id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c), NULL_TREE, NULL_TREE); id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL); if (id) { if (id == error_mark_node) return true; mark_used (id); tree body = DECL_SAVED_TREE (id); if (!body) return true; if (TREE_CODE (body) == STATEMENT_LIST) { tree_stmt_iterator tsi; tree placeholder = NULL_TREE, decl_placeholder = NULL_TREE; int i; tree stmts[7]; tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id))); atype = TREE_TYPE (atype); bool need_static_cast = !same_type_p (type, atype); memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; if (TREE_CODE (t) == MEM_REF) { decl_placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl_placeholder) = 1; DECL_IGNORED_P (decl_placeholder) = 1; OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder; } if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0]))) cxx_mark_addressable (placeholder); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1])) && TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) != REFERENCE_TYPE) cxx_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); tree omp_out = placeholder; tree omp_in = decl_placeholder ? decl_placeholder : convert_from_reference (OMP_CLAUSE_DECL (c)); if (need_static_cast) { tree rtype = build_reference_type (atype); omp_out = build_static_cast (rtype, omp_out, tf_warning_or_error); omp_in = build_static_cast (rtype, omp_in, tf_warning_or_error); if (omp_out == error_mark_node || omp_in == error_mark_node) return true; omp_out = convert_from_reference (omp_out); omp_in = convert_from_reference (omp_in); } OMP_CLAUSE_REDUCTION_MERGE (c) = clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]), DECL_EXPR_DECL (stmts[1]), omp_in, omp_out); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3]))) cxx_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4]))) cxx_mark_addressable (placeholder); tree omp_priv = decl_placeholder ? decl_placeholder : convert_from_reference (OMP_CLAUSE_DECL (c)); tree omp_orig = placeholder; if (need_static_cast) { if (i == 7) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction with constructor " "initializer for base class %qT", atype); return true; } tree rtype = build_reference_type (atype); omp_priv = build_static_cast (rtype, omp_priv, tf_warning_or_error); omp_orig = build_static_cast (rtype, omp_orig, tf_warning_or_error); if (omp_priv == error_mark_node || omp_orig == error_mark_node) return true; omp_priv = convert_from_reference (omp_priv); omp_orig = convert_from_reference (omp_orig); } if (i == 6) *need_default_ctor = true; OMP_CLAUSE_REDUCTION_INIT (c) = clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]), DECL_EXPR_DECL (stmts[3]), omp_priv, omp_orig); if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else if (i >= 3) { if (CLASS_TYPE_P (type) && !pod_type_p (type)) *need_default_ctor = true; else { tree init; tree v = decl_placeholder ? decl_placeholder : convert_from_reference (t); if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } } } } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) *need_dtor = true; else { error ("user defined reduction not found for %qE", omp_clause_printable_decl (t)); return true; } if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF) gcc_assert (TYPE_SIZE_UNIT (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST); return false; } /* Called from finish_struct_1. linear(this) or linear(this:step) clauses might not be finalized yet because the class has been incomplete when parsing #pragma omp declare simd methods. Fix those up now. */ void finish_omp_declare_simd_methods (tree t) { if (processing_template_decl) return; for (tree x = TYPE_FIELDS (t); x; x = DECL_CHAIN (x)) { if (TREE_CODE (TREE_TYPE (x)) != METHOD_TYPE) continue; tree ods = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (x)); if (!ods || !TREE_VALUE (ods)) continue; for (tree c = TREE_VALUE (TREE_VALUE (ods)); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && integer_zerop (OMP_CLAUSE_DECL (c)) && OMP_CLAUSE_LINEAR_STEP (c) && TREE_CODE (TREE_TYPE (OMP_CLAUSE_LINEAR_STEP (c))) == POINTER_TYPE) { tree s = OMP_CLAUSE_LINEAR_STEP (c); s = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, s); s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MULT_EXPR, sizetype, s, TYPE_SIZE_UNIT (t)); OMP_CLAUSE_LINEAR_STEP (c) = s; } } } /* Adjust sink depend clause to take into account pointer offsets. Return TRUE if there was a problem processing the offset, and the whole clause should be removed. */ static bool cp_finish_omp_clause_depend_sink (tree sink_clause) { tree t = OMP_CLAUSE_DECL (sink_clause); gcc_assert (TREE_CODE (t) == TREE_LIST); /* Make sure we don't adjust things twice for templates. */ if (processing_template_decl) return false; for (; t; t = TREE_CHAIN (t)) { tree decl = TREE_VALUE (t); if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { tree offset = TREE_PURPOSE (t); bool neg = wi::neg_p (wi::to_wide (offset)); offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset); decl = mark_rvalue_use (decl); decl = convert_from_reference (decl); tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (sink_clause), neg ? MINUS_EXPR : PLUS_EXPR, decl, offset); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (sink_clause), MINUS_EXPR, sizetype, fold_convert (sizetype, t2), fold_convert (sizetype, decl)); if (t2 == error_mark_node) return true; TREE_PURPOSE (t) = t2; } } return false; } /* For all elements of CLAUSES, validate them vs OpenMP constraints. Remove any elements from the list that are invalid. */ tree finish_omp_clauses (tree clauses, enum c_omp_region_type ort) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head; tree c, t, *pc; tree safelen = NULL_TREE; bool branch_seen = false; bool copyprivate_seen = false; bool ordered_seen = false; bool oacc_async = false; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); bitmap_initialize (&map_head, &bitmap_default_obstack); bitmap_initialize (&map_field_head, &bitmap_default_obstack); bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack); if (ort & C_ORT_ACC) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC) { oacc_async = true; break; } for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; bool field_ok = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_PRIVATE: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_REDUCTION: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) { remove = true; break; } if (TREE_CODE (t) == TREE_LIST) { while (TREE_CODE (t) == TREE_LIST) t = TREE_CHAIN (t); } else { gcc_assert (TREE_CODE (t) == MEM_REF); t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == POINTER_PLUS_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == ADDR_EXPR || INDIRECT_REF_P (t)) t = TREE_OPERAND (t, 0); } tree n = omp_clause_decl_field (t); if (n) t = n; goto check_dup_generic_t; } if (oacc_async) cxx_mark_addressable (t); goto check_dup_generic; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_COPYIN: goto check_dup_generic; case OMP_CLAUSE_LINEAR: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_OMP_DECLARE_SIMD && OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT) { error_at (OMP_CLAUSE_LOCATION (c), "modifier should not be specified in %<linear%> " "clause on %<simd%> or %<for%> constructs"); OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT; } if ((VAR_P (t) || TREE_CODE (t) == PARM_DECL) && !type_dependent_expression_p (t)) { tree type = TREE_TYPE (t); if ((OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF || OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_UVAL) && TREE_CODE (type) != REFERENCE_TYPE) { error ("linear clause with %qs modifier applied to " "non-reference variable with %qT type", OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF ? "ref" : "uval", TREE_TYPE (t)); remove = true; break; } if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_REF) { if (!INTEGRAL_TYPE_P (type) && TREE_CODE (type) != POINTER_TYPE) { error ("linear clause applied to non-integral non-pointer" " variable with %qT type", TREE_TYPE (t)); remove = true; break; } } } t = OMP_CLAUSE_LINEAR_STEP (c); if (t == NULL_TREE) t = integer_one_node; if (t == error_mark_node) { remove = true; break; } else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t)) && (ort != C_ORT_OMP_DECLARE_SIMD || TREE_CODE (t) != PARM_DECL || TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (t))))) { error ("linear step expression must be integral"); remove = true; break; } else { t = mark_rvalue_use (t); if (ort == C_ORT_OMP_DECLARE_SIMD && TREE_CODE (t) == PARM_DECL) { OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1; goto check_dup_generic; } if (!processing_template_decl && (VAR_P (OMP_CLAUSE_DECL (c)) || TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL)) { if (ort == C_ORT_OMP_DECLARE_SIMD) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step %qE is neither " "constant nor a parameter", t); remove = true; break; } } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); tree type = TREE_TYPE (OMP_CLAUSE_DECL (c)); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF) { type = build_pointer_type (type); tree d = fold_convert (type, OMP_CLAUSE_DECL (c)); t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, d, t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t), fold_convert (sizetype, d)); if (t == error_mark_node) { remove = true; break; } } else if (TREE_CODE (type) == POINTER_TYPE /* Can't multiply the step yet if *this is still incomplete type. */ && (ort != C_ORT_OMP_DECLARE_SIMD || TREE_CODE (OMP_CLAUSE_DECL (c)) != PARM_DECL || !DECL_ARTIFICIAL (OMP_CLAUSE_DECL (c)) || DECL_NAME (OMP_CLAUSE_DECL (c)) != this_identifier || !TYPE_BEING_DEFINED (TREE_TYPE (type)))) { tree d = convert_from_reference (OMP_CLAUSE_DECL (c)); t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, d, t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t), fold_convert (sizetype, d)); if (t == error_mark_node) { remove = true; break; } } else t = fold_convert (type, t); } OMP_CLAUSE_LINEAR_STEP (c) = t; } goto check_dup_generic; check_dup_generic: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) { if (!remove && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); } else t = OMP_CLAUSE_DECL (c); check_dup_generic_t: if (t == current_class_ptr && (ort != C_ORT_OMP_DECLARE_SIMD || (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_UNIFORM))) { error ("%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && (!field_ok || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error ("%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (ort == C_ORT_ACC && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t))) { error ("%qD appears more than once in reduction clauses", t); remove = true; } else bitmap_set_bit (&oacc_reduction_head, DECL_UID (t)); } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); if (!field_ok) break; handle_field_decl: if (!remove && TREE_CODE (t) == FIELD_DECL && t == OMP_CLAUSE_DECL (c) && ort != C_ORT_ACC) { OMP_CLAUSE_DECL (c) = omp_privatize_field (t, (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)); if (OMP_CLAUSE_DECL (c) == error_mark_node) remove = true; } break; case OMP_CLAUSE_FIRSTPRIVATE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); else t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_ACC && t == current_class_ptr) { error ("%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<firstprivate%>", t); else error ("%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); goto handle_field_decl; case OMP_CLAUSE_LASTPRIVATE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); else t = OMP_CLAUSE_DECL (c); if (t == current_class_ptr) { error ("%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<lastprivate%>", t); else error ("%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); goto handle_field_decl; case OMP_CLAUSE_IF: t = OMP_CLAUSE_IF_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_IF_EXPR (c) = t; break; case OMP_CLAUSE_FINAL: t = OMP_CLAUSE_FINAL_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_FINAL_EXPR (c) = t; break; case OMP_CLAUSE_GANG: /* Operand 1 is the gang static: argument. */ t = OMP_CLAUSE_OPERAND (c, 1); if (t != NULL_TREE) { if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<gang%> static expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1 && t != integer_minus_one_node) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<gang%> static value must be " "positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } OMP_CLAUSE_OPERAND (c, 1) = t; } /* Check operand 0, the num argument. */ /* FALLTHRU */ case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: if (OMP_CLAUSE_OPERAND (c, 0) == NULL_TREE) break; /* FALLTHRU */ case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_GANG: error_at (OMP_CLAUSE_LOCATION (c), "%<gang%> num expression must be integral"); break; case OMP_CLAUSE_VECTOR: error_at (OMP_CLAUSE_LOCATION (c), "%<vector%> length expression must be integral"); break; case OMP_CLAUSE_WORKER: error_at (OMP_CLAUSE_LOCATION (c), "%<worker%> num expression must be integral"); break; default: error_at (OMP_CLAUSE_LOCATION (c), "%qs expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); } remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_GANG: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<gang%> num value must be positive"); break; case OMP_CLAUSE_VECTOR: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<vector%> length value must be " "positive"); break; case OMP_CLAUSE_WORKER: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<worker%> num value must be " "positive"); break; default: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%qs value must be positive", omp_clause_code_name [OMP_CLAUSE_CODE (c)]); } t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_OPERAND (c, 0) = t; } break; case OMP_CLAUSE_SCHEDULE: if (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) { const char *p = NULL; switch (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK) { case OMP_CLAUSE_SCHEDULE_STATIC: p = "static"; break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: break; case OMP_CLAUSE_SCHEDULE_GUIDED: break; case OMP_CLAUSE_SCHEDULE_AUTO: p = "auto"; break; case OMP_CLAUSE_SCHEDULE_RUNTIME: p = "runtime"; break; default: gcc_unreachable (); } if (p) { error_at (OMP_CLAUSE_LOCATION (c), "%<nonmonotonic%> modifier specified for %qs " "schedule kind", p); OMP_CLAUSE_SCHEDULE_KIND (c) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c) & ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC); } } t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("schedule chunk size expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "chunk size value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_SAFELEN: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%qs length expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%qs length expression must be positive constant" " integer expression", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } OMP_CLAUSE_OPERAND (c, 0) = t; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SAFELEN) safelen = c; } break; case OMP_CLAUSE_ASYNC: t = OMP_CLAUSE_ASYNC_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<async%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_ASYNC_EXPR (c) = t; } break; case OMP_CLAUSE_WAIT: t = OMP_CLAUSE_WAIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_WAIT_EXPR (c) = t; break; case OMP_CLAUSE_THREAD_LIMIT: t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<thread_limit%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<thread_limit%> value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; } break; case OMP_CLAUSE_DEVICE: t = OMP_CLAUSE_DEVICE_ID (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<device%> id must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DEVICE_ID (c) = t; } break; case OMP_CLAUSE_DIST_SCHEDULE: t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<dist_schedule%> chunk size expression must be " "integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (t == current_class_ptr && ort != C_ORT_OMP_DECLARE_SIMD) { error ("%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error ("%qD is not a variable in %<aligned%> clause", t); else error ("%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!type_dependent_expression_p (t) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE && (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE || (!POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t))) && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) != ARRAY_TYPE)))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array nor a reference to pointer or array", t); remove = true; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error ("%qD appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c); if (t == error_mark_node) remove = true; else if (t == NULL_TREE) break; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<aligned%> clause alignment expression must " "be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%<aligned%> clause alignment expression must be " "positive constant integer expression"); remove = true; } } OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t; } break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (t == NULL_TREE) { gcc_assert (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE); break; } if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) { if (cp_finish_omp_clause_depend_sink (c)) remove = true; break; } if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; break; } if (t == error_mark_node) remove = true; else if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error ("%qD is not a variable in %<depend%> clause", t); else error ("%qE is not a variable in %<depend%> clause", t); remove = true; } else if (t == current_class_ptr) { error ("%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; } else if (!processing_template_decl && !cxx_mark_addressable (t)) remove = true; break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != TREE_LIST && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } while (TREE_CODE (t) == ARRAY_REF) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { while (TREE_CODE (t) == COMPONENT_REF) t = TREE_OPERAND (t, 0); if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); if (bitmap_bit_p (&map_field_head, DECL_UID (t))) break; if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion" " clauses", t); else if (ort == C_ORT_ACC) error ("%qD appears more than once in data" " clauses", t); else error ("%qD appears more than once in map" " clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); bitmap_set_bit (&map_field_head, DECL_UID (t)); } } } break; } if (t == error_mark_node) { remove = true; break; } if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) { t = TREE_OPERAND (t, 0); OMP_CLAUSE_DECL (c) = t; } if (TREE_CODE (t) == COMPONENT_REF && (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP && OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_) { if (type_dependent_expression_p (t)) break; if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_TYPE (TREE_OPERAND (t, 0)) && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); remove = true; break; } t = TREE_OPERAND (t, 0); } if (remove) break; if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); if (VAR_P (t) || TREE_CODE (t) == PARM_DECL) { if (bitmap_bit_p (&map_field_head, DECL_UID (t))) goto handle_map_references; } } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER)) break; if (DECL_P (t)) error ("%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) { error ("%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (ort != C_ORT_ACC && t == current_class_ptr) { error ("%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } else if (!processing_template_decl && TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE && (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER)) && !cxx_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER))) && t == OMP_CLAUSE_DECL (c) && !type_dependent_expression_p (t) && !cp_omp_mappable_type ((TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FORCE_DEVICEPTR && !type_dependent_expression_p (t) && !POINTER_TYPE_P (TREE_TYPE (t))) { error ("%qD is not a pointer variable", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) { if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion clauses", t); if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears more than once in map clauses", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error ("%qD appears more than once in data clauses", t); else error ("%qD appears both in data and map clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); if (t != OMP_CLAUSE_DECL (c) && TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF) bitmap_set_bit (&map_field_head, DECL_UID (t)); } handle_map_references: if (!remove && !processing_template_decl && (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP && TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == REFERENCE_TYPE) { t = OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) { OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t); if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))); } else if (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_REFERENCE) && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER)) { tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if (TREE_CODE (t) == COMPONENT_REF) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALWAYS_POINTER); else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_REFERENCE); OMP_CLAUSE_DECL (c2) = t; OMP_CLAUSE_SIZE (c2) = size_zero_node; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t); if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))); c = c2; } } break; case OMP_CLAUSE_TO_DECLARE: case OMP_CLAUSE_LINK: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == FUNCTION_DECL && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) ; else if (!VAR_P (t)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) { if (TREE_CODE (t) == TEMPLATE_ID_EXPR) error_at (OMP_CLAUSE_LOCATION (c), "template %qE in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else if (really_overloaded_fn (t)) error_at (OMP_CLAUSE_LOCATION (c), "overloaded function name %qE in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is neither a variable nor a function name " "in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); } else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } if (remove) break; if (bitmap_bit_p (&generic_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once on the same " "%<declare target%> directive", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not an argument in %<uniform%> clause", t); else error ("%qE is not an argument in %<uniform%> clause", t); remove = true; break; } /* map_head bitmap is used as uniform_head if declare_simd. */ bitmap_set_bit (&map_head, DECL_UID (t)); goto check_dup_generic; case OMP_CLAUSE_GRAINSIZE: t = OMP_CLAUSE_GRAINSIZE_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<grainsize%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<grainsize%> value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_GRAINSIZE_EXPR (c) = t; } break; case OMP_CLAUSE_PRIORITY: t = OMP_CLAUSE_PRIORITY_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<priority%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) == -1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<priority%> value must be non-negative"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_PRIORITY_EXPR (c) = t; } break; case OMP_CLAUSE_HINT: t = OMP_CLAUSE_HINT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<num_tasks%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_HINT_EXPR (c) = t; } break; case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_PTR: field_ok = (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP; t = OMP_CLAUSE_DECL (c); if (!type_dependent_expression_p (t)) { tree type = TREE_TYPE (t); if (TREE_CODE (type) != POINTER_TYPE && TREE_CODE (type) != ARRAY_TYPE && (TREE_CODE (type) != REFERENCE_TYPE || (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE && TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE))) { error_at (OMP_CLAUSE_LOCATION (c), "%qs variable is neither a pointer, nor an array " "nor reference to pointer or array", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } goto check_dup_generic; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_SEQ: break; case OMP_CLAUSE_TILE: for (tree list = OMP_CLAUSE_TILE_LIST (c); !remove && list; list = TREE_CHAIN (list)) { t = TREE_VALUE (list); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<tile%> argument needs integral type"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { /* Zero is used to indicate '*', we permit you to get there via an ICE of value zero. */ t = maybe_constant_value (t); if (!tree_fits_shwi_p (t) || tree_to_shwi (t) < 0) { error_at (OMP_CLAUSE_LOCATION (c), "%<tile%> argument needs positive " "integral constant"); remove = true; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } /* Update list item. */ TREE_VALUE (list) = t; } break; case OMP_CLAUSE_ORDERED: ordered_seen = true; break; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error ("%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; } branch_seen = true; break; default: gcc_unreachable (); } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } for (pc = &clauses, c = clauses; c ; c = *pc) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); bool remove = false; bool need_complete_type = false; bool need_default_ctor = false; bool need_copy_ctor = false; bool need_copy_assignment = false; bool need_implicitly_determined = false; bool need_dtor = false; tree type, inner_type; switch (c_kind) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; break; case OMP_CLAUSE_PRIVATE: need_complete_type = true; need_default_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_FIRSTPRIVATE: need_complete_type = true; need_copy_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_LASTPRIVATE: need_complete_type = true; need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_REDUCTION: need_implicitly_determined = true; break; case OMP_CLAUSE_LINEAR: if (ort != C_ORT_OMP_DECLARE_SIMD) need_implicitly_determined = true; else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) && !bitmap_bit_p (&map_head, DECL_UID (OMP_CLAUSE_LINEAR_STEP (c)))) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step is a parameter %qD not " "specified in %<uniform%> clause", OMP_CLAUSE_LINEAR_STEP (c)); *pc = OMP_CLAUSE_CHAIN (c); continue; } break; case OMP_CLAUSE_COPYPRIVATE: need_copy_assignment = true; break; case OMP_CLAUSE_COPYIN: need_copy_assignment = true; break; case OMP_CLAUSE_SIMDLEN: if (safelen && !processing_template_decl && tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen), OMP_CLAUSE_SIMDLEN_EXPR (c))) { error_at (OMP_CLAUSE_LOCATION (c), "%<simdlen%> clause value is bigger than " "%<safelen%> clause value"); OMP_CLAUSE_SIMDLEN_EXPR (c) = OMP_CLAUSE_SAFELEN_EXPR (safelen); } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_SCHEDULE: if (ordered_seen && (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) { error_at (OMP_CLAUSE_LOCATION (c), "%<nonmonotonic%> schedule modifier specified " "together with %<ordered%> clause"); OMP_CLAUSE_SCHEDULE_KIND (c) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c) & ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC); } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } /* FALLTHRU */ default: pc = &OMP_CLAUSE_CHAIN (c); continue; } t = OMP_CLAUSE_DECL (c); if (processing_template_decl && !VAR_P (t) && TREE_CODE (t) != PARM_DECL) { pc = &OMP_CLAUSE_CHAIN (c); continue; } switch (c_kind) { case OMP_CLAUSE_LASTPRIVATE: if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { need_default_ctor = true; need_dtor = true; } break; case OMP_CLAUSE_REDUCTION: if (finish_omp_reduction_clause (c, &need_default_ctor, &need_dtor)) remove = true; else t = OMP_CLAUSE_DECL (c); break; case OMP_CLAUSE_COPYIN: if (!VAR_P (t) || !CP_DECL_THREAD_LOCAL_P (t)) { error ("%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; } break; default: break; } if (need_complete_type || need_copy_assignment) { t = require_complete_type (t); if (t == error_mark_node) remove = true; else if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE && !complete_type_or_else (TREE_TYPE (TREE_TYPE (t)), t)) remove = true; } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (cxx_omp_predetermined_sharing (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: /* const vars may be specified in firstprivate clause. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && cxx_omp_const_qual_no_mutable (t)) break; share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error ("%qE is predetermined %qs for %qs", omp_clause_printable_decl (t), share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } /* We're interested in the base element, not arrays. */ inner_type = type = TREE_TYPE (t); if ((need_complete_type || need_copy_assignment || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) && TREE_CODE (inner_type) == REFERENCE_TYPE) inner_type = TREE_TYPE (inner_type); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); /* Check for special function availability by building a call to one. Save the results, because later we won't be in the right context for making these queries. */ if (CLASS_TYPE_P (inner_type) && COMPLETE_TYPE_P (inner_type) && (need_default_ctor || need_copy_ctor || need_copy_assignment || need_dtor) && !type_dependent_expression_p (t) && cxx_omp_create_clause_info (c, inner_type, need_default_ctor, need_copy_ctor, need_copy_assignment, need_dtor)) remove = true; if (!remove && c_kind == OMP_CLAUSE_SHARED && processing_template_decl) { t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) OMP_CLAUSE_DECL (c) = t; } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* Start processing OpenMP clauses that can include any privatization clauses for non-static data members. */ tree push_omp_privatization_clauses (bool ignore_next) { if (omp_private_member_ignore_next) { omp_private_member_ignore_next = ignore_next; return NULL_TREE; } omp_private_member_ignore_next = ignore_next; if (omp_private_member_map) omp_private_member_vec.safe_push (error_mark_node); return push_stmt_list (); } /* Revert remapping of any non-static data members since the last push_omp_privatization_clauses () call. */ void pop_omp_privatization_clauses (tree stmt) { if (stmt == NULL_TREE) return; stmt = pop_stmt_list (stmt); if (omp_private_member_map) { while (!omp_private_member_vec.is_empty ()) { tree t = omp_private_member_vec.pop (); if (t == error_mark_node) { add_stmt (stmt); return; } bool no_decl_expr = t == integer_zero_node; if (no_decl_expr) t = omp_private_member_vec.pop (); tree *v = omp_private_member_map->get (t); gcc_assert (v); if (!no_decl_expr) add_decl_expr (*v); omp_private_member_map->remove (t); } delete omp_private_member_map; omp_private_member_map = NULL; } add_stmt (stmt); } /* Remember OpenMP privatization clauses mapping and clear it. Used for lambdas. */ void save_omp_privatization_clauses (vec<tree> &save) { save = vNULL; if (omp_private_member_ignore_next) save.safe_push (integer_one_node); omp_private_member_ignore_next = false; if (!omp_private_member_map) return; while (!omp_private_member_vec.is_empty ()) { tree t = omp_private_member_vec.pop (); if (t == error_mark_node) { save.safe_push (t); continue; } tree n = t; if (t == integer_zero_node) t = omp_private_member_vec.pop (); tree *v = omp_private_member_map->get (t); gcc_assert (v); save.safe_push (*v); save.safe_push (t); if (n != t) save.safe_push (n); } delete omp_private_member_map; omp_private_member_map = NULL; } /* Restore OpenMP privatization clauses mapping saved by the above function. */ void restore_omp_privatization_clauses (vec<tree> &save) { gcc_assert (omp_private_member_vec.is_empty ()); omp_private_member_ignore_next = false; if (save.is_empty ()) return; if (save.length () == 1 && save[0] == integer_one_node) { omp_private_member_ignore_next = true; save.release (); return; } omp_private_member_map = new hash_map <tree, tree>; while (!save.is_empty ()) { tree t = save.pop (); tree n = t; if (t != error_mark_node) { if (t == integer_one_node) { omp_private_member_ignore_next = true; gcc_assert (save.is_empty ()); break; } if (t == integer_zero_node) t = save.pop (); tree &v = omp_private_member_map->get_or_insert (t); v = save.pop (); } omp_private_member_vec.safe_push (t); if (n != t) omp_private_member_vec.safe_push (n); } save.release (); } /* For all variables in the tree_list VARS, mark them as thread local. */ void finish_omp_threadprivate (tree vars) { tree t; /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); if (error_operand_p (v)) ; else if (!VAR_P (v)) error ("%<threadprivate%> %qD is not file, namespace " "or block scope variable", v); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ else if (TREE_USED (v) && (DECL_LANG_SPECIFIC (v) == NULL || !CP_DECL_THREADPRIVATE_P (v))) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v)))) error ("%<threadprivate%> %qE has incomplete type", v); else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)) && CP_DECL_CONTEXT (v) != current_class_type) error ("%<threadprivate%> %qE directive not " "in %qT definition", v, CP_DECL_CONTEXT (v)); else { /* Allocate a LANG_SPECIFIC structure for V, if needed. */ if (DECL_LANG_SPECIFIC (v) == NULL) { retrofit_lang_decl (v); /* Make sure that DECL_DISCRIMINATOR_P continues to be true after the allocation of the lang_decl structure. */ if (DECL_DISCRIMINATOR_P (v)) DECL_LANG_SPECIFIC (v)->u.base.u2sel = 1; } if (! CP_DECL_THREAD_LOCAL_P (v)) { CP_DECL_THREAD_LOCAL_P (v) = true; set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } CP_DECL_THREADPRIVATE_P (v) = 1; } } } /* Build an OpenMP structured block. */ tree begin_omp_structured_block (void) { return do_pushlevel (sk_omp); } tree finish_omp_structured_block (tree block) { return do_poplevel (block); } /* Similarly, except force the retention of the BLOCK. */ tree begin_omp_parallel (void) { keep_next_level (true); return begin_omp_structured_block (); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. */ tree finish_oacc_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound statement. */ tree finish_oacc_host_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_HOST_DATA); TREE_TYPE (stmt) = void_type_node; OACC_HOST_DATA_CLAUSES (stmt) = clauses; OACC_HOST_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OMP construct CODE, with BODY and CLAUSES as its compound statement. */ tree finish_omp_construct (enum tree_code code, tree body, tree clauses) { body = finish_omp_structured_block (body); tree stmt = make_node (code); TREE_TYPE (stmt) = void_type_node; OMP_BODY (stmt) = body; OMP_CLAUSES (stmt) = clauses; return add_stmt (stmt); } tree finish_omp_parallel (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = body; return add_stmt (stmt); } tree begin_omp_task (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_task (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = body; return add_stmt (stmt); } /* Helper function for finish_omp_for. Convert Ith random access iterator into integral iterator. Return FALSE if successful. */ static bool handle_omp_for_class_iterator (int i, location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree *body, tree *pre_body, tree &clauses, tree *lastp, int collapse, int ordered) { tree diff, iter_init, iter_incr = NULL, last; tree incr_var = NULL, orig_pre_body, orig_body, c; tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); tree iter = decl; location_t elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); cond = cp_fully_fold (cond); switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: case NE_EXPR: if (TREE_OPERAND (cond, 1) == iter) cond = build2 (swap_tree_comparison (TREE_CODE (cond)), TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0)); if (TREE_OPERAND (cond, 0) != iter) cond = error_mark_node; else { tree tem = build_x_binary_op (EXPR_LOCATION (cond), TREE_CODE (cond), iter, ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (tem)) return true; } break; default: cond = error_mark_node; break; } if (cond == error_mark_node) { error_at (elocus, "invalid controlling predicate"); return true; } diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); diff = cp_fully_fold (diff); if (error_operand_p (diff)) return true; if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE) { error_at (elocus, "difference between %qE and %qD does not have integer type", TREE_OPERAND (cond, 1), iter); return true; } if (!c_omp_check_loop_iv_exprs (locus, orig_declv, TREE_VEC_ELT (declv, i), NULL_TREE, cond, cp_walk_subtrees)) return true; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != iter) { incr = error_mark_node; break; } iter_incr = build_x_unary_op (EXPR_LOCATION (incr), TREE_CODE (incr), iter, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; else if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) incr = integer_one_node; else incr = integer_minus_one_node; break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != iter) incr = error_mark_node; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); if (TREE_OPERAND (rhs, 0) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1))) != INTEGER_TYPE) incr = error_mark_node; else { iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, TREE_CODE (rhs), TREE_OPERAND (rhs, 1), tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 1); incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); if (TREE_CODE (rhs) == MINUS_EXPR) { incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr); incr = fold_simple (incr); } if (TREE_CODE (incr) != INTEGER_CST && (TREE_CODE (incr) != NOP_EXPR || (TREE_CODE (TREE_OPERAND (incr, 0)) != INTEGER_CST))) iter_incr = NULL; } } else if (TREE_OPERAND (rhs, 1) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE || TREE_CODE (rhs) != PLUS_EXPR) incr = error_mark_node; else { iter_incr = build_x_binary_op (EXPR_LOCATION (rhs), PLUS_EXPR, TREE_OPERAND (rhs, 0), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, NOP_EXPR, iter_incr, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 0); iter_incr = NULL; } } else incr = error_mark_node; } else incr = error_mark_node; break; default: incr = error_mark_node; break; } if (incr == error_mark_node) { error_at (elocus, "invalid increment expression"); return true; } incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); bool taskloop_iv_seen = false; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (c) == iter) { if (code == OMP_TASKLOOP) { taskloop_iv_seen = true; OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c) = 1; } break; } else if (code == OMP_TASKLOOP && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (c) == iter) { taskloop_iv_seen = true; OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c) = 1; } decl = create_temporary_var (TREE_TYPE (diff)); pushdecl (decl); add_decl_expr (decl); last = create_temporary_var (TREE_TYPE (diff)); pushdecl (last); add_decl_expr (last); if (c && iter_incr == NULL && TREE_CODE (incr) != INTEGER_CST && (!ordered || (i < collapse && collapse > 1))) { incr_var = create_temporary_var (TREE_TYPE (diff)); pushdecl (incr_var); add_decl_expr (incr_var); } gcc_assert (stmts_are_full_exprs_p ()); tree diffvar = NULL_TREE; if (code == OMP_TASKLOOP) { if (!taskloop_iv_seen) { tree ivc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (ivc) = iter; cxx_omp_finish_clause (ivc, NULL); OMP_CLAUSE_CHAIN (ivc) = clauses; clauses = ivc; } tree lvc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (lvc) = last; OMP_CLAUSE_CHAIN (lvc) = clauses; clauses = lvc; diffvar = create_temporary_var (TREE_TYPE (diff)); pushdecl (diffvar); add_decl_expr (diffvar); } orig_pre_body = *pre_body; *pre_body = push_stmt_list (); if (orig_pre_body) add_stmt (orig_pre_body); if (init != NULL) finish_expr_stmt (build_x_modify_expr (elocus, iter, NOP_EXPR, init, tf_warning_or_error)); init = build_int_cst (TREE_TYPE (diff), 0); if (c && iter_incr == NULL && (!ordered || (i < collapse && collapse > 1))) { if (incr_var) { finish_expr_stmt (build_x_modify_expr (elocus, incr_var, NOP_EXPR, incr, tf_warning_or_error)); incr = incr_var; } iter_incr = build_x_modify_expr (elocus, iter, PLUS_EXPR, incr, tf_warning_or_error); } if (c && ordered && i < collapse && collapse > 1) iter_incr = incr; finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, init, tf_warning_or_error)); if (diffvar) { finish_expr_stmt (build_x_modify_expr (elocus, diffvar, NOP_EXPR, diff, tf_warning_or_error)); diff = diffvar; } *pre_body = pop_stmt_list (*pre_body); cond = cp_build_binary_op (elocus, TREE_CODE (cond), decl, diff, tf_warning_or_error); incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR, elocus, incr, NULL_TREE); orig_body = *body; *body = push_stmt_list (); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, decl, tf_warning_or_error)); add_stmt (orig_body); *body = pop_stmt_list (*body); if (c) { OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list (); if (!ordered) finish_expr_stmt (iter_incr); else { iter_init = decl; if (i < collapse && collapse > 1 && !error_operand_p (iter_incr)) iter_init = build2 (PLUS_EXPR, TREE_TYPE (diff), iter_init, iter_incr); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), iter_init, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); } OMP_CLAUSE_LASTPRIVATE_STMT (c) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c)); } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; *lastp = last; return false; } /* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR are directly for their associated operands in the statement. DECL and INIT are a combo; if DECL is NULL then INIT ought to be a MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are optional statements that need to go before the loop into its sk_omp scope. */ tree finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, vec<tree> *orig_inits, tree clauses) { tree omp_for = NULL, orig_incr = NULL; tree decl = NULL, init, cond, incr; tree last = NULL_TREE; location_t elocus; int i; int collapse = 1; int ordered = 0; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); if (TREE_VEC_LENGTH (declv) > 1) { tree c; c = omp_find_clause (clauses, OMP_CLAUSE_TILE); if (c) collapse = list_length (OMP_CLAUSE_TILE_LIST (c)); else { c = omp_find_clause (clauses, OMP_CLAUSE_COLLAPSE); if (c) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c)); if (collapse != TREE_VEC_LENGTH (declv)) ordered = TREE_VEC_LENGTH (declv); } } for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (decl == NULL) { if (init != NULL) switch (TREE_CODE (init)) { case MODIFY_EXPR: decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR) { decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 2); } break; default: break; } if (decl == NULL) { error_at (locus, "expected iteration declaration or initialization"); return NULL; } } if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (cond == NULL) { error_at (elocus, "missing controlling predicate"); return NULL; } if (incr == NULL) { error_at (elocus, "missing increment expression"); return NULL; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; } if (orig_inits) { bool fail = false; tree orig_init; FOR_EACH_VEC_ELT (*orig_inits, i, orig_init) if (orig_init && !c_omp_check_loop_iv_exprs (locus, declv, TREE_VEC_ELT (declv, i), orig_init, NULL_TREE, cp_walk_subtrees)) fail = true; if (fail) return NULL; } if (dependent_omp_for_p (declv, initv, condv, incrv)) { tree stmt; stmt = make_node (code); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { /* This is really just a place-holder. We'll be decomposing this again and going through the cp_build_modify_expr path below when we instantiate the thing. */ TREE_VEC_ELT (initv, i) = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i), TREE_VEC_ELT (initv, i)); } TREE_TYPE (stmt) = void_type_node; OMP_FOR_INIT (stmt) = initv; OMP_FOR_COND (stmt) = condv; OMP_FOR_INCR (stmt) = incrv; OMP_FOR_BODY (stmt) = body; OMP_FOR_PRE_BODY (stmt) = pre_body; OMP_FOR_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, locus); return add_stmt (stmt); } if (!orig_declv) orig_declv = copy_node (declv); if (processing_template_decl) orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); ) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (!DECL_P (decl)) { error_at (elocus, "expected iteration declaration or initialization"); return NULL; } if (incr && TREE_CODE (incr) == MODOP_EXPR) { if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; incr = cp_build_modify_expr (elocus, TREE_OPERAND (incr, 0), TREE_CODE (TREE_OPERAND (incr, 1)), TREE_OPERAND (incr, 2), tf_warning_or_error); } if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (code == OMP_SIMD) { error_at (elocus, "%<#pragma omp simd%> used with class " "iteration variable %qE", decl); return NULL; } if (handle_omp_for_class_iterator (i, locus, code, declv, orig_declv, initv, condv, incrv, &body, &pre_body, clauses, &last, collapse, ordered)) return NULL; continue; } if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && !TYPE_PTR_P (TREE_TYPE (decl))) { error_at (elocus, "invalid type for iteration variable %qE", decl); return NULL; } if (!processing_template_decl) { init = fold_build_cleanup_point_expr (TREE_TYPE (init), init); init = cp_build_modify_expr (elocus, decl, NOP_EXPR, init, tf_warning_or_error); } else init = build2 (MODIFY_EXPR, void_type_node, decl, init); if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond) && !processing_template_decl) { tree t = TREE_OPERAND (cond, 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (cond, 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (decl == error_mark_node || init == error_mark_node) return NULL; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; i++; } if (IS_EMPTY_STMT (pre_body)) pre_body = NULL; omp_for = c_finish_omp_for (locus, code, declv, orig_declv, initv, condv, incrv, body, pre_body); /* Check for iterators appearing in lb, b or incr expressions. */ if (omp_for && !c_omp_check_loop_iv (omp_for, orig_declv, cp_walk_subtrees)) omp_for = NULL_TREE; if (omp_for == NULL) { return NULL; } add_stmt (omp_for); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i); if (TREE_CODE (incr) != MODIFY_EXPR) continue; if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1)) && BINARY_CLASS_P (TREE_OPERAND (incr, 1)) && !processing_template_decl) { tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (orig_incr) TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i); } OMP_FOR_CLAUSES (omp_for) = clauses; /* For simd loops with non-static data member iterators, we could have added OMP_CLAUSE_LINEAR clauses without OMP_CLAUSE_LINEAR_STEP. As we know the step at this point, fill it in. */ if (code == OMP_SIMD && !processing_template_decl && TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)) == 1) for (tree c = omp_find_clause (clauses, OMP_CLAUSE_LINEAR); c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_LINEAR)) if (OMP_CLAUSE_LINEAR_STEP (c) == NULL_TREE) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0), 0); gcc_assert (decl == OMP_CLAUSE_DECL (c)); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree step, stept; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl))); OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), 1); break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl))); OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), -1); break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); switch (TREE_CODE (incr)) { case PLUS_EXPR: if (TREE_OPERAND (incr, 1) == decl) step = TREE_OPERAND (incr, 0); else step = TREE_OPERAND (incr, 1); break; case MINUS_EXPR: case POINTER_PLUS_EXPR: gcc_assert (TREE_OPERAND (incr, 0) == decl); step = TREE_OPERAND (incr, 1); break; default: gcc_unreachable (); } stept = TREE_TYPE (decl); if (POINTER_TYPE_P (stept)) stept = sizetype; step = fold_convert (stept, step); if (TREE_CODE (incr) == MINUS_EXPR) step = fold_build1 (NEGATE_EXPR, stept, step); OMP_CLAUSE_LINEAR_STEP (c) = step; break; default: gcc_unreachable (); } } return omp_for; } void finish_omp_atomic (enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool seq_cst) { tree orig_lhs; tree orig_rhs; tree orig_v; tree orig_lhs1; tree orig_rhs1; bool dependent_p; tree stmt; orig_lhs = lhs; orig_rhs = rhs; orig_v = v; orig_lhs1 = lhs1; orig_rhs1 = rhs1; dependent_p = false; stmt = NULL_TREE; /* Even in a template, we can detect invalid uses of the atomic pragma if neither LHS nor RHS is type-dependent. */ if (processing_template_decl) { dependent_p = (type_dependent_expression_p (lhs) || (rhs && type_dependent_expression_p (rhs)) || (v && type_dependent_expression_p (v)) || (lhs1 && type_dependent_expression_p (lhs1)) || (rhs1 && type_dependent_expression_p (rhs1))); if (!dependent_p) { lhs = build_non_dependent_expr (lhs); if (rhs) rhs = build_non_dependent_expr (rhs); if (v) v = build_non_dependent_expr (v); if (lhs1) lhs1 = build_non_dependent_expr (lhs1); if (rhs1) rhs1 = build_non_dependent_expr (rhs1); } } if (!dependent_p) { bool swapped = false; if (rhs1 && cp_tree_equal (lhs, rhs)) { std::swap (rhs, rhs1); swapped = !commutative_tree_code (opcode); } if (rhs1 && !cp_tree_equal (lhs, rhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } if (lhs1 && !cp_tree_equal (lhs, lhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } stmt = c_finish_omp_atomic (input_location, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, seq_cst, processing_template_decl != 0); if (stmt == error_mark_node) return; } if (processing_template_decl) { if (code == OMP_ATOMIC_READ) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs), OMP_ATOMIC_READ, orig_lhs); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } else { if (opcode == NOP_EXPR) stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs); else stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs); if (orig_rhs1) stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1), COMPOUND_EXPR, orig_rhs1, stmt); if (code != OMP_ATOMIC) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs1), code, orig_lhs1, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } } stmt = build2 (OMP_ATOMIC, void_type_node, integer_zero_node, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; } finish_expr_stmt (stmt); } void finish_omp_barrier (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_flush (void) { tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskwait (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskyield (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancel (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancel%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector (); tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc)); ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc), build_zero_cst (type)); } else ifc = boolean_true_node; vec->quick_push (build_int_cst (integer_type_node, mask)); vec->quick_push (ifc); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancellation_point (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancellation point%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector_single (build_int_cst (integer_type_node, mask)); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } /* Begin a __transaction_atomic or __transaction_relaxed statement. If PCOMPOUND is non-null, this is for a function-transaction-block, and we should create an extra compound stmt. */ tree begin_transaction_stmt (location_t loc, tree *pcompound, int flags) { tree r; if (pcompound) *pcompound = begin_compound_stmt (0); r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE); /* Only add the statement to the function if support enabled. */ if (flag_tm) add_stmt (r); else error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0 ? G_("%<__transaction_relaxed%> without " "transactional memory support enabled") : G_("%<__transaction_atomic%> without " "transactional memory support enabled"))); TRANSACTION_EXPR_BODY (r) = push_stmt_list (); TREE_SIDE_EFFECTS (r) = 1; return r; } /* End a __transaction_atomic or __transaction_relaxed statement. If COMPOUND_STMT is non-null, this is for a function-transaction-block, and we should end the compound. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ void finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex) { TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt)); TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0; TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0; TRANSACTION_EXPR_IS_STMT (stmt) = 1; /* noexcept specifications are not allowed for function transactions. */ gcc_assert (!(noex && compound_stmt)); if (noex) { tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt), noex); protected_set_expr_location (body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt))); TREE_SIDE_EFFECTS (body) = 1; TRANSACTION_EXPR_BODY (stmt) = body; } if (compound_stmt) finish_compound_stmt (compound_stmt); } /* Build a __transaction_atomic or __transaction_relaxed expression. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ tree build_transaction_expr (location_t loc, tree expr, int flags, tree noex) { tree ret; if (noex) { expr = build_must_not_throw_expr (expr, noex); protected_set_expr_location (expr, loc); TREE_SIDE_EFFECTS (expr) = 1; } ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr); if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret) = 1; TREE_SIDE_EFFECTS (ret) = 1; SET_EXPR_LOCATION (ret, loc); return ret; } void init_cp_semantics (void) { } /* Build a STATIC_ASSERT for a static assertion with the condition CONDITION and the message text MESSAGE. LOCATION is the location of the static assertion in the source code. When MEMBER_P, this static assertion is a member of a class. */ void finish_static_assert (tree condition, tree message, location_t location, bool member_p) { tsubst_flags_t complain = tf_warning_or_error; if (message == NULL_TREE || message == error_mark_node || condition == NULL_TREE || condition == error_mark_node) return; if (check_for_bare_parameter_packs (condition)) condition = error_mark_node; if (instantiation_dependent_expression_p (condition)) { /* We're in a template; build a STATIC_ASSERT and put it in the right place. */ tree assertion; assertion = make_node (STATIC_ASSERT); STATIC_ASSERT_CONDITION (assertion) = condition; STATIC_ASSERT_MESSAGE (assertion) = message; STATIC_ASSERT_SOURCE_LOCATION (assertion) = location; if (member_p) maybe_add_class_template_decl_list (current_class_type, assertion, /*friend_p=*/0); else add_stmt (assertion); return; } /* Fold the expression and convert it to a boolean value. */ condition = perform_implicit_conversion_flags (boolean_type_node, condition, complain, LOOKUP_NORMAL); condition = fold_non_dependent_expr (condition); if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition)) /* Do nothing; the condition is satisfied. */ ; else { location_t saved_loc = input_location; input_location = location; if (TREE_CODE (condition) == INTEGER_CST && integer_zerop (condition)) { int sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (message)))); int len = TREE_STRING_LENGTH (message) / sz - 1; /* Report the error. */ if (len == 0) error ("static assertion failed"); else error ("static assertion failed: %s", TREE_STRING_POINTER (message)); } else if (condition && condition != error_mark_node) { error ("non-constant condition for static assertion"); if (require_rvalue_constant_expression (condition)) cxx_constant_value (condition); } input_location = saved_loc; } } /* Implements the C++0x decltype keyword. Returns the type of EXPR, suitable for use as a type-specifier. ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an id-expression or a class member access, FALSE when it was parsed as a full expression. */ tree finish_decltype_type (tree expr, bool id_expression_or_member_access_p, tsubst_flags_t complain) { tree type = NULL_TREE; if (!expr || error_operand_p (expr)) return error_mark_node; if (TYPE_P (expr) || TREE_CODE (expr) == TYPE_DECL || (TREE_CODE (expr) == BIT_NOT_EXPR && TYPE_P (TREE_OPERAND (expr, 0)))) { if (complain & tf_error) error ("argument to decltype must be an expression"); return error_mark_node; } /* Depending on the resolution of DR 1172, we may later need to distinguish instantiation-dependent but not type-dependent expressions so that, say, A<decltype(sizeof(T))>::U doesn't require 'typename'. */ if (instantiation_dependent_uneval_expression_p (expr)) { type = cxx_make_type (DECLTYPE_TYPE); DECLTYPE_TYPE_EXPR (type) = expr; DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type) = id_expression_or_member_access_p; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } /* The type denoted by decltype(e) is defined as follows: */ expr = resolve_nondeduced_context (expr, complain); if (invalid_nonstatic_memfn_p (input_location, expr, complain)) return error_mark_node; if (type_unknown_p (expr)) { if (complain & tf_error) error ("decltype cannot resolve address of overloaded function"); return error_mark_node; } /* To get the size of a static data member declared as an array of unknown bound, we need to instantiate it. */ if (VAR_P (expr) && VAR_HAD_UNKNOWN_BOUND (expr) && DECL_TEMPLATE_INSTANTIATION (expr)) instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false); if (id_expression_or_member_access_p) { /* If e is an id-expression or a class member access (5.2.5 [expr.ref]), decltype(e) is defined as the type of the entity named by e. If there is no such entity, or e names a set of overloaded functions, the program is ill-formed. */ if (identifier_p (expr)) expr = lookup_name (expr); if (INDIRECT_REF_P (expr)) /* This can happen when the expression is, e.g., "a.b". Just look at the underlying operand. */ expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == MEMBER_REF || TREE_CODE (expr) == SCOPE_REF) /* We're only interested in the field itself. If it is a BASELINK, we will need to see through it in the next step. */ expr = TREE_OPERAND (expr, 1); if (BASELINK_P (expr)) /* See through BASELINK nodes to the underlying function. */ expr = BASELINK_FUNCTIONS (expr); /* decltype of a decomposition name drops references in the tuple case (unlike decltype of a normal variable) and keeps cv-qualifiers from the containing object in the other cases (unlike decltype of a member access expression). */ if (DECL_DECOMPOSITION_P (expr)) { if (DECL_HAS_VALUE_EXPR_P (expr)) /* Expr is an array or struct subobject proxy, handle bit-fields properly. */ return unlowered_expr_type (expr); else /* Expr is a reference variable for the tuple case. */ return lookup_decomp_type (expr); } switch (TREE_CODE (expr)) { case FIELD_DECL: if (DECL_BIT_FIELD_TYPE (expr)) { type = DECL_BIT_FIELD_TYPE (expr); break; } /* Fall through for fields that aren't bitfields. */ gcc_fallthrough (); case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case TEMPLATE_PARM_INDEX: expr = mark_type_use (expr); type = TREE_TYPE (expr); break; case ERROR_MARK: type = error_mark_node; break; case COMPONENT_REF: case COMPOUND_EXPR: mark_type_use (expr); type = is_bitfield_expr_with_lowered_type (expr); if (!type) type = TREE_TYPE (TREE_OPERAND (expr, 1)); break; case BIT_FIELD_REF: gcc_unreachable (); case INTEGER_CST: case PTRMEM_CST: /* We can get here when the id-expression refers to an enumerator or non-type template parameter. */ type = TREE_TYPE (expr); break; default: /* Handle instantiated template non-type arguments. */ type = TREE_TYPE (expr); break; } } else { /* Within a lambda-expression: Every occurrence of decltype((x)) where x is a possibly parenthesized id-expression that names an entity of automatic storage duration is treated as if x were transformed into an access to a corresponding data member of the closure type that would have been declared if x were a use of the denoted entity. */ if (outer_automatic_var_p (expr) && current_function_decl && LAMBDA_FUNCTION_P (current_function_decl)) type = capture_decltype (expr); else if (error_operand_p (expr)) type = error_mark_node; else if (expr == current_class_ptr) /* If the expression is just "this", we want the cv-unqualified pointer for the "this" type. */ type = TYPE_MAIN_VARIANT (TREE_TYPE (expr)); else { /* Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */ cp_lvalue_kind clk = lvalue_kind (expr); type = unlowered_expr_type (expr); gcc_assert (TREE_CODE (type) != REFERENCE_TYPE); /* For vector types, pick a non-opaque variant. */ if (VECTOR_TYPE_P (type)) type = strip_typedefs (type); if (clk != clk_none && !(clk & clk_class)) type = cp_build_reference_type (type, (clk & clk_rvalueref)); } } return type; } /* Called from trait_expr_value to evaluate either __has_nothrow_assign or __has_nothrow_copy, depending on assign_p. Returns true iff all the copy {ctor,assign} fns are nothrow. */ static bool classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p) { tree fns = NULL_TREE; if (assign_p || TYPE_HAS_COPY_CTOR (type)) fns = get_class_binding (type, assign_p ? assign_op_identifier : ctor_identifier); bool saw_copy = false; for (ovl_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (copy_fn_p (fn) > 0) { saw_copy = true; maybe_instantiate_noexcept (fn); if (!TYPE_NOTHROW_P (TREE_TYPE (fn))) return false; } } return saw_copy; } /* Actually evaluates the trait. */ static bool trait_expr_value (cp_trait_kind kind, tree type1, tree type2) { enum tree_code type_code1; tree t; type_code1 = TREE_CODE (type1); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, true)))); case CPTK_HAS_TRIVIAL_ASSIGN: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1)))); case CPTK_HAS_NOTHROW_CONSTRUCTOR: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2) || (CLASS_TYPE_P (type1) && (t = locate_ctor (type1)) && (maybe_instantiate_noexcept (t), TYPE_NOTHROW_P (TREE_TYPE (t))))); case CPTK_HAS_TRIVIAL_CONSTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1))); case CPTK_HAS_NOTHROW_COPY: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, false))); case CPTK_HAS_TRIVIAL_COPY: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1))); case CPTK_HAS_TRIVIAL_DESTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DESTRUCTOR (type1))); case CPTK_HAS_VIRTUAL_DESTRUCTOR: return type_has_virtual_destructor (type1); case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: return type_has_unique_obj_representations (type1); case CPTK_IS_ABSTRACT: return ABSTRACT_CLASS_TYPE_P (type1); case CPTK_IS_AGGREGATE: return CP_AGGREGATE_TYPE_P (type1); case CPTK_IS_BASE_OF: return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && (same_type_ignoring_top_level_qualifiers_p (type1, type2) || DERIVED_FROM_P (type1, type2))); case CPTK_IS_CLASS: return NON_UNION_CLASS_TYPE_P (type1); case CPTK_IS_EMPTY: return NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1); case CPTK_IS_ENUM: return type_code1 == ENUMERAL_TYPE; case CPTK_IS_FINAL: return CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1); case CPTK_IS_LITERAL_TYPE: return literal_type_p (type1); case CPTK_IS_POD: return pod_type_p (type1); case CPTK_IS_POLYMORPHIC: return CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1); case CPTK_IS_SAME_AS: return same_type_p (type1, type2); case CPTK_IS_STD_LAYOUT: return std_layout_type_p (type1); case CPTK_IS_TRIVIAL: return trivial_type_p (type1); case CPTK_IS_TRIVIALLY_ASSIGNABLE: return is_trivially_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: return is_trivially_xible (INIT_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_COPYABLE: return trivially_copyable_p (type1); case CPTK_IS_UNION: return type_code1 == UNION_TYPE; case CPTK_IS_ASSIGNABLE: return is_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_CONSTRUCTIBLE: return is_xible (INIT_EXPR, type1, type2); default: gcc_unreachable (); return false; } } /* If TYPE is an array of unknown bound, or (possibly cv-qualified) void, or a complete type, returns true, otherwise false. */ static bool check_trait_type (tree type) { if (type == NULL_TREE) return true; if (TREE_CODE (type) == TREE_LIST) return (check_trait_type (TREE_VALUE (type)) && check_trait_type (TREE_CHAIN (type))); if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && COMPLETE_TYPE_P (TREE_TYPE (type))) return true; if (VOID_TYPE_P (type)) return true; return !!complete_type_or_else (strip_array_types (type), NULL_TREE); } /* Process a trait expression. */ tree finish_trait_expr (cp_trait_kind kind, tree type1, tree type2) { if (type1 == error_mark_node || type2 == error_mark_node) return error_mark_node; if (processing_template_decl) { tree trait_expr = make_node (TRAIT_EXPR); TREE_TYPE (trait_expr) = boolean_type_node; TRAIT_EXPR_TYPE1 (trait_expr) = type1; TRAIT_EXPR_TYPE2 (trait_expr) = type2; TRAIT_EXPR_KIND (trait_expr) = kind; return trait_expr; } switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: case CPTK_HAS_TRIVIAL_ASSIGN: case CPTK_HAS_NOTHROW_CONSTRUCTOR: case CPTK_HAS_TRIVIAL_CONSTRUCTOR: case CPTK_HAS_NOTHROW_COPY: case CPTK_HAS_TRIVIAL_COPY: case CPTK_HAS_TRIVIAL_DESTRUCTOR: case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: case CPTK_HAS_VIRTUAL_DESTRUCTOR: case CPTK_IS_ABSTRACT: case CPTK_IS_AGGREGATE: case CPTK_IS_EMPTY: case CPTK_IS_FINAL: case CPTK_IS_LITERAL_TYPE: case CPTK_IS_POD: case CPTK_IS_POLYMORPHIC: case CPTK_IS_STD_LAYOUT: case CPTK_IS_TRIVIAL: case CPTK_IS_TRIVIALLY_COPYABLE: if (!check_trait_type (type1)) return error_mark_node; break; case CPTK_IS_ASSIGNABLE: case CPTK_IS_CONSTRUCTIBLE: break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: if (!check_trait_type (type1) || !check_trait_type (type2)) return error_mark_node; break; case CPTK_IS_BASE_OF: if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && !same_type_ignoring_top_level_qualifiers_p (type1, type2) && !complete_type_or_else (type2, NULL_TREE)) /* We already issued an error. */ return error_mark_node; break; case CPTK_IS_CLASS: case CPTK_IS_ENUM: case CPTK_IS_UNION: case CPTK_IS_SAME_AS: break; default: gcc_unreachable (); } return (trait_expr_value (kind, type1, type2) ? boolean_true_node : boolean_false_node); } /* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64, which is ignored for C++. */ void set_float_const_decimal64 (void) { } void clear_float_const_decimal64 (void) { } bool float_const_decimal64_p (void) { return 0; } /* Return true if T designates the implied `this' parameter. */ bool is_this_parameter (tree t) { if (!DECL_P (t) || DECL_NAME (t) != this_identifier) return false; gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t) || (cp_binding_oracle && TREE_CODE (t) == VAR_DECL)); return true; } /* Insert the deduced return type for an auto function. */ void apply_deduced_return_type (tree fco, tree return_type) { tree result; if (return_type == error_mark_node) return; if (DECL_CONV_FN_P (fco)) DECL_NAME (fco) = make_conv_op_name (return_type); TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco)); result = DECL_RESULT (fco); if (result == NULL_TREE) return; if (TREE_TYPE (result) == return_type) return; if (!processing_template_decl && !VOID_TYPE_P (return_type) && !complete_type_or_else (return_type, NULL_TREE)) return; /* We already have a DECL_RESULT from start_preparsed_function. Now we need to redo the work it and allocate_struct_function did to reflect the new type. */ gcc_assert (current_function_decl == fco); result = build_decl (input_location, RESULT_DECL, NULL_TREE, TYPE_MAIN_VARIANT (return_type)); DECL_ARTIFICIAL (result) = 1; DECL_IGNORED_P (result) = 1; cp_apply_type_quals_to_decl (cp_type_quals (return_type), result); DECL_RESULT (fco) = result; if (!processing_template_decl) { bool aggr = aggregate_value_p (result, fco); #ifdef PCC_STATIC_STRUCT_RETURN cfun->returns_pcc_struct = aggr; #endif cfun->returns_struct = aggr; } } /* DECL is a local variable or parameter from the surrounding scope of a lambda-expression. Returns the decltype for a use of the capture field for DECL even if it hasn't been captured yet. */ static tree capture_decltype (tree decl) { tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl)); /* FIXME do lookup instead of list walk? */ tree cap = value_member (decl, LAMBDA_EXPR_CAPTURE_LIST (lam)); tree type; if (cap) type = TREE_TYPE (TREE_PURPOSE (cap)); else switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam)) { case CPLD_NONE: error ("%qD is not captured", decl); return error_mark_node; case CPLD_COPY: type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) type = TREE_TYPE (type); break; case CPLD_REFERENCE: type = TREE_TYPE (decl); if (TREE_CODE (type) != REFERENCE_TYPE) type = build_reference_type (TREE_TYPE (decl)); break; default: gcc_unreachable (); } if (TREE_CODE (type) != REFERENCE_TYPE) { if (!LAMBDA_EXPR_MUTABLE_P (lam)) type = cp_build_qualified_type (type, (cp_type_quals (type) |TYPE_QUAL_CONST)); type = build_reference_type (type); } return type; } /* Build a unary fold expression of EXPR over OP. If IS_RIGHT is true, this is a right unary fold. Otherwise it is a left unary fold. */ static tree finish_unary_fold_expr (tree expr, int op, tree_code dir) { // Build a pack expansion (assuming expr has pack type). if (!uses_parameter_packs (expr)) { error_at (location_of (expr), "operand of fold expression has no " "unexpanded parameter packs"); return error_mark_node; } tree pack = make_pack_expansion (expr); // Build the fold expression. tree code = build_int_cstu (integer_type_node, abs (op)); tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack); FOLD_EXPR_MODIFY_P (fold) = (op < 0); return fold; } tree finish_left_unary_fold_expr (tree expr, int op) { return finish_unary_fold_expr (expr, op, UNARY_LEFT_FOLD_EXPR); } tree finish_right_unary_fold_expr (tree expr, int op) { return finish_unary_fold_expr (expr, op, UNARY_RIGHT_FOLD_EXPR); } /* Build a binary fold expression over EXPR1 and EXPR2. The associativity of the fold is determined by EXPR1 and EXPR2 (whichever has an unexpanded parameter pack). */ tree finish_binary_fold_expr (tree pack, tree init, int op, tree_code dir) { pack = make_pack_expansion (pack); tree code = build_int_cstu (integer_type_node, abs (op)); tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack, init); FOLD_EXPR_MODIFY_P (fold) = (op < 0); return fold; } tree finish_binary_fold_expr (tree expr1, tree expr2, int op) { // Determine which expr has an unexpanded parameter pack and // set the pack and initial term. bool pack1 = uses_parameter_packs (expr1); bool pack2 = uses_parameter_packs (expr2); if (pack1 && !pack2) return finish_binary_fold_expr (expr1, expr2, op, BINARY_RIGHT_FOLD_EXPR); else if (pack2 && !pack1) return finish_binary_fold_expr (expr2, expr1, op, BINARY_LEFT_FOLD_EXPR); else { if (pack1) error ("both arguments in binary fold have unexpanded parameter packs"); else error ("no unexpanded parameter packs in binary fold"); } return error_mark_node; } /* Finish __builtin_launder (arg). */ tree finish_builtin_launder (location_t loc, tree arg, tsubst_flags_t complain) { tree orig_arg = arg; if (!type_dependent_expression_p (arg)) arg = decay_conversion (arg, complain); if (error_operand_p (arg)) return error_mark_node; if (!type_dependent_expression_p (arg) && TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE) { error_at (loc, "non-pointer argument to %<__builtin_launder%>"); return error_mark_node; } if (processing_template_decl) arg = orig_arg; return build_call_expr_internal_loc (loc, IFN_LAUNDER, TREE_TYPE (arg), 1, arg); } #include "gt-cp-semantics.h"
linear_master_slave_constraint.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Aditya Ghantasala // #if !defined(LINEAR_MASTER_SLAVE_CONSTRAINT_H) #define LINEAR_MASTER_SLAVE_CONSTRAINT_H // System includes // External includes // Project includes #include "includes/define.h" #include "includes/master_slave_constraint.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class LinearMasterSlaveConstraint * @ingroup KratosCore * @brief This class allows to add a master-slave constraint which is of the form * SlaveDofVector = T * MasterDofVector + ConstantVector. * * or * * SlaveDof = weight * MasterDof + Constant * @details The data T and ConstantVector (or the equivalent scalars) are not stored in the base class, since they can be eventually evaluated runtime. * @author Aditya Ghantasala */ class LinearMasterSlaveConstraint : public MasterSlaveConstraint { public: ///@name Type Definitions ///@{ /// The definition of the base class, we take the rest of the definitions from the base class typedef MasterSlaveConstraint BaseType; /// The index type definition typedef BaseType::IndexType IndexType; /// The DoF type definition typedef BaseType::DofType DofType; /// The DoF pointer vector type definition typedef BaseType::DofPointerVectorType DofPointerVectorType; /// The node type definition typedef BaseType::NodeType NodeType; /// The equation Id vector type definition typedef BaseType::EquationIdVectorType EquationIdVectorType; /// The matrix type definition typedef BaseType::MatrixType MatrixType; /// The vector type definition typedef BaseType::VectorType VectorType; /// The variable type definition (double) typedef BaseType::VariableType VariableType; /// Pointer definition of DataValueContainer KRATOS_CLASS_POINTER_DEFINITION(LinearMasterSlaveConstraint); ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief The default constructor * @param IndexType The Id of the new created constraint */ explicit LinearMasterSlaveConstraint(IndexType Id = 0) : BaseType(Id) { } /** * @brief Constructor by passing a vector of Master and slave dofs and corresponding Matrix and constant vector * @param IndexType The Id of the new created constraint * @param rMasterDofsVector The vector containing the DoF of the master side * @param rSlaveDofsVector The vector containing the DoF of the slave side * @param rRelationMatrix The relation matrix between the master/slave DoF * @param rConstantVector The vector containing the additional kinematic relationship */ LinearMasterSlaveConstraint( IndexType Id, DofPointerVectorType& rMasterDofsVector, DofPointerVectorType& rSlaveDofsVector, const MatrixType& rRelationMatrix, const VectorType& rConstantVector ) : BaseType(Id), mSlaveDofsVector(rSlaveDofsVector), mMasterDofsVector(rMasterDofsVector), mRelationMatrix(rRelationMatrix), mConstantVector(rConstantVector) { } /** * @brief Constructor by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterNode The node of master side * @param rMasterVariable The variable of the master DoF * @param rSlaveNode The node of slave side * @param rSlaveVariable The variable of the slave DoF * @param Weight The relation between the master/slave DoF * @param Constant The additional kinematic relationship */ LinearMasterSlaveConstraint( IndexType Id, NodeType& rMasterNode, const VariableType& rMasterVariable, NodeType& rSlaveNode, const VariableType& rSlaveVariable, const double Weight, const double Constant ) : MasterSlaveConstraint(Id) { // Resizing the memeber variables mRelationMatrix.resize(1,1,false); mConstantVector.resize(1,false); // Obtaining the dofs from the variables mSlaveDofsVector.push_back(rSlaveNode.pGetDof(rSlaveVariable)); mMasterDofsVector.push_back(rMasterNode.pGetDof(rMasterVariable)); mRelationMatrix(0,0) = Weight; mConstantVector(0) = Constant; // Setting the slave flag on the node rSlaveNode.Set(SLAVE); } /// Destructor. ~LinearMasterSlaveConstraint() override { } /// Copy Constructor LinearMasterSlaveConstraint(const LinearMasterSlaveConstraint& rOther) : BaseType(rOther), mSlaveDofsVector(rOther.mSlaveDofsVector), mMasterDofsVector(rOther.mMasterDofsVector), mRelationMatrix(rOther.mRelationMatrix), mConstantVector(rOther.mConstantVector) { } /// Assignment operator LinearMasterSlaveConstraint& operator=(const LinearMasterSlaveConstraint& rOther) { BaseType::operator=( rOther ); mSlaveDofsVector = rOther.mSlaveDofsVector; mMasterDofsVector = rOther.mMasterDofsVector; mRelationMatrix = rOther.mRelationMatrix; mConstantVector = rOther.mConstantVector; return *this; } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterDofsVector The DoFs of master side * @param rSlaveDofsVector The DoFs of master side * @param rRelationMatrix The relation matrix between the master/slave DoF * @param rConstantVector The vector containing the additional kinematic relationship * @return A Pointer to the new constraint */ MasterSlaveConstraint::Pointer Create( IndexType Id, DofPointerVectorType& rMasterDofsVector, DofPointerVectorType& rSlaveDofsVector, const MatrixType& rRelationMatrix, const VectorType& rConstantVector ) const override { KRATOS_TRY return Kratos::make_shared<LinearMasterSlaveConstraint>(Id, rMasterDofsVector, rSlaveDofsVector, rRelationMatrix, rConstantVector); KRATOS_CATCH(""); } /** * @brief Create method by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterNode The node of master side * @param rMasterVariable The variable of the master DoF * @param rSlaveNode The node of slave side * @param rSlaveVariable The variable of the slave DoF * @param Weight The relation between the master/slave DoF * @param Constant The additional kinematic relationship * @return A Pointer to the new constraint */ MasterSlaveConstraint::Pointer Create( IndexType Id, NodeType& rMasterNode, const VariableType& rMasterVariable, NodeType& rSlaveNode, const VariableType& rSlaveVariable, const double Weight, const double Constant ) const override { KRATOS_TRY return Kratos::make_shared<LinearMasterSlaveConstraint>(Id, rMasterNode, rMasterVariable, rSlaveNode, rSlaveVariable, Weight, Constant); KRATOS_CATCH(""); } /** * @brief It creates a new constraint pointer and clones the previous constraint data * @param NewId the ID of the new constraint * @return a Pointer to the new constraint */ MasterSlaveConstraint::Pointer Clone (IndexType NewId) const override { KRATOS_TRY MasterSlaveConstraint::Pointer p_new_const = Kratos::make_shared<LinearMasterSlaveConstraint>(*this); p_new_const->SetId(NewId); p_new_const->SetData(this->GetData()); p_new_const->Set(Flags(*this)); return p_new_const; KRATOS_CATCH(""); } /** * @brief Determines the constrant's slvae and master list of DOFs * @param rSlaveDofsVector The list of slave DOFs * @param rMasterDofsVector The list of slave DOFs * @param rCurrentProcessInfo The current process info instance */ void GetDofList( DofPointerVectorType& rSlaveDofsVector, DofPointerVectorType& rMasterDofsVector, const ProcessInfo& rCurrentProcessInfo ) const override { rSlaveDofsVector = mSlaveDofsVector; rMasterDofsVector = mMasterDofsVector; } /** * @brief Determines the constrant's slave and master list of DOFs * @param rSlaveDofsVector The list of slave DOFs * @param rMasterDofsVector The list of slave DOFs * @param rCurrentProcessInfo The current process info instance */ void SetDofList( const DofPointerVectorType& rSlaveDofsVector, const DofPointerVectorType& rMasterDofsVector, const ProcessInfo& rCurrentProcessInfo ) override { mSlaveDofsVector = rSlaveDofsVector; mMasterDofsVector = rMasterDofsVector; } /** * @brief This determines the master equation IDs connected to this constraint * @param rSlaveEquationIds The vector of slave equation ids. * @param rMasterEquationIds The vector of master equation ids. * @param rCurrentProcessInfo The current process info instance */ void EquationIdVector( EquationIdVectorType& rSlaveEquationIds, EquationIdVectorType& rMasterEquationIds, const ProcessInfo& rCurrentProcessInfo ) const override { if (rSlaveEquationIds.size() != mSlaveDofsVector.size()) rSlaveEquationIds.resize(mSlaveDofsVector.size()); if (rMasterEquationIds.size() != mMasterDofsVector.size()) rMasterEquationIds.resize(mMasterDofsVector.size()); for(IndexType i=0; i<rSlaveEquationIds.size(); ++i) rSlaveEquationIds[i] = mSlaveDofsVector[i]->EquationId(); for(IndexType i=0; i<rMasterEquationIds.size(); ++i) rMasterEquationIds[i] = mMasterDofsVector[i]->EquationId(); } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ const DofPointerVectorType& GetSlaveDofsVector() const override { return mSlaveDofsVector; } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ void SetSlaveDofsVector(const DofPointerVectorType& rSlaveDofsVector) override { mSlaveDofsVector = rSlaveDofsVector; } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ const DofPointerVectorType& GetMasterDofsVector() const override { return mMasterDofsVector; } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ void SetMasterDofsVector(const DofPointerVectorType& rMasterDofsVector) override { mMasterDofsVector = rMasterDofsVector; } /** * @brief This method resets the values of the slave dofs * @param rCurrentProcessInfo the current process info instance */ void ResetSlaveDofs(const ProcessInfo& rCurrentProcessInfo) override { for (IndexType i = 0; i < mSlaveDofsVector.size(); ++i) { #pragma omp atomic mSlaveDofsVector[i]->GetSolutionStepValue() *= 0.0; } } /** * @brief This method directly applies the master/slave relationship * @param rCurrentProcessInfo the current process info instance */ void Apply(const ProcessInfo& rCurrentProcessInfo) override { // Saving the master dofs values Vector master_dofs_values(mMasterDofsVector.size()); for (IndexType i = 0; i < mMasterDofsVector.size(); ++i) { master_dofs_values[i] = mMasterDofsVector[i]->GetSolutionStepValue(); } // Apply the constraint to the slave dofs for (IndexType i = 0; i < mRelationMatrix.size1(); ++i) { double aux = mConstantVector[i]; for(IndexType j = 0; j < mRelationMatrix.size2(); ++j) { aux += mRelationMatrix(i,j) * master_dofs_values[j]; } #pragma omp atomic mSlaveDofsVector[i]->GetSolutionStepValue() += aux; } } /** * @brief This method allows to set the Local System in case is not computed on tunning time (internal variable) * @param rRelationMatrix the matrix which relates the master and slave degree of freedom * @param rConstant The constant vector (one entry for each slave) * @param rCurrentProcessInfo The current process info instance */ void SetLocalSystem( const MatrixType& rRelationMatrix, const VectorType& rConstantVector, const ProcessInfo& rCurrentProcessInfo ) override { if (mRelationMatrix.size1() != rRelationMatrix.size1() || mRelationMatrix.size2() != rRelationMatrix.size2()) mRelationMatrix.resize(rRelationMatrix.size1(), rRelationMatrix.size2(), false); noalias(mRelationMatrix) = rRelationMatrix; if (mConstantVector.size() != rConstantVector.size()) mConstantVector.resize(rConstantVector.size(), false); noalias(mConstantVector) = rConstantVector; } /** * @brief This is called during the assembling process in order * @details To calculate the relation between the master and slave. * @param rRelationMatrix the matrix which relates the master and slave degree of freedom * @param rConstant The constant vector (one entry for each slave) * @param rCurrentProcessInfo the current process info instance */ void CalculateLocalSystem( MatrixType& rRelationMatrix, VectorType& rConstantVector, const ProcessInfo& rCurrentProcessInfo ) const override { if (rRelationMatrix.size1() != mRelationMatrix.size1() || rRelationMatrix.size2() != mRelationMatrix.size2()) rRelationMatrix.resize(mRelationMatrix.size1(), mRelationMatrix.size2(), false); noalias(rRelationMatrix) = mRelationMatrix; if (rConstantVector.size() != mConstantVector.size()) rConstantVector.resize(mConstantVector.size(), false); noalias(rConstantVector) = mConstantVector; } ///@} ///@name Input and output ///@{ /** * @brief Returns the string containing a detailed description of this object. * @return the string with informations */ std::string GetInfo() const override { return "Linear User Provided Master Slave Constraint class !"; } /** * @brief This method prints the current Constraint Id * @param rOStream The buffer where the information is given */ void PrintInfo(std::ostream &rOStream) const override { rOStream << " LinearMasterSlaveConstraint Id : " << this->Id() << std::endl; rOStream << " Number of Slaves : " << mSlaveDofsVector.size() << std::endl; rOStream << " Number of Masters : " << mMasterDofsVector.size() << std::endl; } ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ DofPointerVectorType mSlaveDofsVector; /// The DoFs of slave side DofPointerVectorType mMasterDofsVector; /// The DoFs of master side MatrixType mRelationMatrix; /// The relation matrix between the master/slave DoF VectorType mConstantVector; /// The vector containing the additional kinematic relationship ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@name Serialization ///@{ friend class Serializer; void save(Serializer &rSerializer) const override { KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, MasterSlaveConstraint); rSerializer.save("SlaveDofVec", mSlaveDofsVector); rSerializer.save("MasterDofVec", mMasterDofsVector); rSerializer.save("RelationMat", mRelationMatrix); rSerializer.save("ConstantVec", mConstantVector); } void load(Serializer &rSerializer) override { KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, MasterSlaveConstraint); rSerializer.load("SlaveDofVec", mSlaveDofsVector); rSerializer.load("MasterDofVec", mMasterDofsVector); rSerializer.load("RelationMat", mRelationMatrix); rSerializer.load("ConstantVec", mConstantVector); } }; ///@name Input/Output funcitons ///@{ /// input stream function inline std::istream& operator>>(std::istream& rIStream, LinearMasterSlaveConstraint& rThis); /// output stream function inline std::ostream& operator<<(std::ostream& rOStream, const LinearMasterSlaveConstraint& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; return rOStream; } ///@} } // namespace Kratos #endif // USER_PROVIDED_LINEAR_MASTER_SLAVE_CONSTRAINT_H
talesf.c
/* Copyright (c) 2011-2012, Daniel S. Standage <daniel.standage@gmail.com> and Erin Doyle <edoyle@iastate.edu> with modifications by Nick Booher <njbooher@gmail.com>. See README for license details. */ // System libraries #include <getopt.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <zlib.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <ctype.h> #include <stdarg.h> #include <bcutils/Hashmap.h> #include <bcutils/Array.h> #include <bcutils/bcutils.h> // Initialize the kseq library #include <bcutils/kseq.h> KSEQ_INIT(gzFile, gzread) typedef struct { int strand; char *sequence; char *sequence_name; unsigned long index; double score; } BindingSite; Hashmap *talesf_kwargs; /* * Utility */ void create_options_string(char *options_str, char *rvd_str) { char cutoff_str[32]; char rvds_eq_str[256]; double cutoff = *((double *) hashmap_get(talesf_kwargs, "cutoff")); int forward_only = *((int *) hashmap_get(talesf_kwargs, "forward_only")); int c_upstream = *((int *) hashmap_get(talesf_kwargs, "c_upstream")); strcat(options_str, "options_used:"); if (!forward_only) { strcat(options_str, "search reverse complement, "); } strcat(options_str, "upstream_base = "); if (c_upstream != 1) { strcat(options_str, "T "); } if (c_upstream != 0) { strcat(options_str, "C "); } sprintf(cutoff_str, ", cutoff = %.2lf, ", cutoff); strcat(options_str, cutoff_str); sprintf(rvds_eq_str, "rvd_sequence = %s", rvd_str); strcat(options_str, rvds_eq_str); strcat(options_str, "\n"); } double *create_lookahead_array(unsigned int *rvd_seq, unsigned int rvd_seq_len, double cutoff, double best_score, double **scoring_matrix) { double *lookahead_array = calloc(rvd_seq_len, sizeof(double)); lookahead_array[rvd_seq_len - 1] = cutoff * best_score; for (int i = rvd_seq_len - 2; i >= 0; i--) { double *scores = scoring_matrix[rvd_seq[i+1]]; double min = scores[0]; for (int j = 0; j < 4; j++) { if (scores[j] < min) { min = scores[j]; } } lookahead_array[i] = lookahead_array[i + 1] - (min); } return lookahead_array; } /* * Core */ int binding_site_compare_score(const void * a, const void * b) { BindingSite *real_a = *((BindingSite **)a); BindingSite *real_b = *((BindingSite **)b); double real_a_score = floorf(real_a->score * 100 + 0.5) / 100; double real_b_score = floorf(real_b->score * 100 + 0.5) / 100; double score_diff = (real_a_score - real_b_score); if(score_diff < 0) { return -1; } else if(score_diff > 0) { return 1; } else { return 0; } } int print_results(Array *results, FILE *log_file) { char *output_filepath = hashmap_get(talesf_kwargs, "output_filepath"); char *organism_name = hashmap_get(talesf_kwargs, "organism_name"); unsigned int num_rvds = *((unsigned int *) hashmap_get(talesf_kwargs, "rvd_seq_len")); double best_score = *((double *) hashmap_get(talesf_kwargs, "best_score")); int forward_only = *((int *) hashmap_get(talesf_kwargs, "forward_only")); char *source_str = "TALESF"; char options_str[512]; // strcat doesn't seem to work unless you do this *options_str = '\0'; char *plus_strand_sequence; FILE *gff_out_file = NULL; FILE *tab_out_file = NULL; FILE *genome_browser_file = NULL; int is_genome = (*organism_name != '\0'); int genome_using_gbrowse = (is_genome && (strcmp(organism_name, "oryza_sativa") == 0 || strcmp(organism_name, "arabidopsis_thaliana") == 0)); size_t output_filepath_length; char* temp_output_filepath; char *rvd_string_printable = strdup(hashmap_get(talesf_kwargs, "rvd_string")); char *pos = strstr(rvd_string_printable, " "); while (pos != NULL) { strncpy(pos, "_", 1); pos = strstr(rvd_string_printable, " "); } create_options_string(options_str, rvd_string_printable); output_filepath_length = strlen(output_filepath) + 5; temp_output_filepath = calloc(output_filepath_length + 1, sizeof(char)); sprintf(temp_output_filepath, "%s.txt", output_filepath); tab_out_file = fopen(temp_output_filepath, "w"); memset(temp_output_filepath, '\0', output_filepath_length); sprintf(temp_output_filepath, "%s.gff3", output_filepath); gff_out_file = fopen(temp_output_filepath, "w"); if(is_genome) { memset(temp_output_filepath, '\0', output_filepath_length); if(genome_using_gbrowse) { sprintf(temp_output_filepath, "%s.gff", output_filepath); } else { sprintf(temp_output_filepath, "%s.bed", output_filepath); } genome_browser_file = fopen(temp_output_filepath, "w"); } free(temp_output_filepath); if(!gff_out_file || !tab_out_file || (is_genome && !genome_browser_file)) { fprintf(log_file, "Error: unable to open output file '%s'\n", output_filepath); free(rvd_string_printable); return 1; } // Tab file header if (forward_only) { fprintf(tab_out_file, "table_ignores:Plus strand sequence\n"); } fprintf(tab_out_file, options_str); fprintf(tab_out_file, "Best Possible Score:%.2lf\n", best_score); fprintf(tab_out_file, "Sequence Name\tStrand\tScore\tStart Position\tTarget Sequence\tPlus strand sequence\n"); // GFF file header fprintf(gff_out_file, "##gff-version 3\n"); if (forward_only) { fprintf(gff_out_file, "#table_display_tags:target_sequence\n"); } else { fprintf(gff_out_file, "#table_display_tags:target_sequence,plus_strand_sequence\n"); } fprintf(gff_out_file, "#%s", options_str); fprintf(gff_out_file, "#Best Possible Score:%.2lf\n", best_score); // bed file header if(genome_using_gbrowse) { fprintf(genome_browser_file, "##gff-version 3\n"); } else if(is_genome) { fprintf(genome_browser_file, "track name=\"TAL Targets\" description=\"Targets for RVD sequence %s\" visibility=2 useScore=1\n", rvd_string_printable); } for(int i = 0; i < array_size(results); i++) { BindingSite *site = (BindingSite *)array_get(results, i); char *sequence = site->sequence; char strand = '+'; char *tab_strand = "Plus"; if(site->strand > 0) plus_strand_sequence = sequence; else { int seq_len = num_rvds + 2; plus_strand_sequence = sequence; sequence = malloc(sizeof(char)*(seq_len+1)); sequence[seq_len] = '\0'; for(int j = 0; j < seq_len; j++) { char base = site->sequence[seq_len - j - 1]; if(base == 'A' || base == 'a') sequence[j] = 'T'; else if(base == 'C' || base == 'c') sequence[j] = 'G'; else if(base == 'G' || base == 'g') sequence[j] = 'C'; else if(base == 'T' || base == 't') sequence[j] = 'A'; else if(base == ' ') sequence[j] = ' '; else { fprintf(stderr, "Error: unexpected character '%c'\n", base); free(sequence); free(rvd_string_printable); fclose(gff_out_file); fclose(tab_out_file); if (genome_browser_file) { fclose(genome_browser_file); } return 1; } } strand = '-'; tab_strand = "Minus"; } fprintf( tab_out_file, "%s\t%s\t%.2lf\t%lu\t%s\t%s\n", site->sequence_name, tab_strand, site->score, site->index + 1, sequence, plus_strand_sequence); fprintf( gff_out_file, "%s\t%s\t%s\t%lu\t%lu\t%.2lf\t%c\t.\trvd_sequence=%s;target_sequence=%s;plus_strand_sequence=%s;\n", site->sequence_name, source_str, "TAL_effector_binding_site", site->index + 1, site->index + num_rvds, site->score, strand, rvd_string_printable, sequence, plus_strand_sequence); if(is_genome && i < 10000) { if(genome_using_gbrowse) { fprintf( genome_browser_file, "chr%s\t%s\t%s\t%lu\t%lu\t%.2lf\t%c\t.\tName=site%d;\n", site->sequence_name, source_str, "TAL_effector_binding_site", site->index + 1, site->index + num_rvds, site->score, strand, i); } else { int bed_score = floorf((best_score / site->score * 1000) + 0.5); fprintf( genome_browser_file,"%s\t%lu\t%lu\tsite%d\t%d\t%c\n", site->sequence_name, site->index, site->index + num_rvds - 1, i, bed_score, strand); } } if(plus_strand_sequence != sequence) { free(sequence); } } free(rvd_string_printable); fclose(gff_out_file); fclose(tab_out_file); if(is_genome) { fclose(genome_browser_file); } return 0; } double score_binding_site(kseq_t *seq, unsigned long i, unsigned int *rvd_seq, unsigned int rvd_seq_len, double **scoring_matrix, double *lookahead_array, int reverse) { double total_score = 0.0; int num_rvds = rvd_seq_len; if (!reverse) { for (unsigned long j = 0; j < rvd_seq_len; j++) { double *scores = scoring_matrix[rvd_seq[j]]; if (seq->seq.s[i+j] == 'A' || seq->seq.s[i+j] == 'a') total_score += scores[0]; else if (seq->seq.s[i+j] == 'C' || seq->seq.s[i+j] == 'c') total_score += scores[1]; else if (seq->seq.s[i+j] == 'G' || seq->seq.s[i+j] == 'g') total_score += scores[2]; else if (seq->seq.s[i+j] == 'T' || seq->seq.s[i+j] == 't') total_score += scores[3]; else total_score += lookahead_array[num_rvds - 1] + 1; if (total_score > lookahead_array[j]) return -1; } } else { for (unsigned long j = 0; j < rvd_seq_len; j++) { double *scores = scoring_matrix[rvd_seq[j]]; unsigned long k = i + rvd_seq_len - j - 2; if (seq->seq.s[k] == 'A' || seq->seq.s[k] == 'a') total_score += scores[3]; else if (seq->seq.s[k] == 'C' || seq->seq.s[k] == 'c') total_score += scores[2]; else if (seq->seq.s[k] == 'G' || seq->seq.s[k] == 'g') total_score += scores[1]; else if (seq->seq.s[k] == 'T' || seq->seq.s[k] == 't') total_score += scores[0]; else total_score += lookahead_array[num_rvds - 1] + 1; if (total_score > lookahead_array[j]) return -1; } } return total_score; } BindingSite *create_binding_site(kseq_t *seq, unsigned long i, int num_rvds, double score, int reverse) { int seq_name_len = strlen(seq->name.s); BindingSite *site = malloc(sizeof(BindingSite)); site->sequence = calloc(num_rvds + 2 + 1, sizeof(char)); site->sequence[num_rvds + 2] = '\0'; site->sequence_name = calloc(seq_name_len + 1, sizeof(char)); site->sequence_name[seq_name_len] = '\0'; strncpy(site->sequence_name, seq->name.s, seq_name_len); site->score = score; if (!(reverse == 1)) { site->strand = 1; site->index = i; strncpy(site->sequence, seq->seq.s + site->index - 1, 1); site->sequence[1] = ' '; strncpy(site->sequence + 2, seq->seq.s + site->index, num_rvds); } else { site->strand = -1; site->index = i - 1; strncpy(site->sequence, seq->seq.s + site->index, num_rvds); site->sequence[num_rvds] = ' '; strncpy(site->sequence + num_rvds + 1, seq->seq.s + site->index + num_rvds, 1); } for(int j = 0; j < num_rvds + 2 + 1; j++) { site->sequence[j] = toupper(site->sequence[j]); } return site; } void cpu_the_whole_shebang(kseq_t *seq, double *lookahead_array, Array *results) { int c_upstream = *((int *) hashmap_get(talesf_kwargs, "c_upstream")); int forward_only = *((int *) hashmap_get(talesf_kwargs, "forward_only")); unsigned int *rvd_seq = hashmap_get(talesf_kwargs, "rvd_seq"); unsigned int num_rvds = *((unsigned int *) hashmap_get(talesf_kwargs, "rvd_seq_len")); double **scoring_matrix = hashmap_get(talesf_kwargs, "scoring_matrix"); #pragma omp parallel for schedule(static) for(unsigned long i = 1; i <= seq->seq.l - num_rvds; i++) { if((c_upstream != 0 && (seq->seq.s[i-1] == 'C' || seq->seq.s[i-1] == 'c')) || (c_upstream != 1 && (seq->seq.s[i-1] == 'T' || seq->seq.s[i-1] == 't'))) { double score = score_binding_site(seq, i, rvd_seq, num_rvds, scoring_matrix, lookahead_array, 0); if(score != -1) { BindingSite *site = create_binding_site(seq, i, num_rvds, score, 0); #pragma omp critical (add_result) array_add(results, site); } } if(!forward_only) { if((c_upstream != 0 && (seq->seq.s[i + num_rvds - 1] == 'G' || seq->seq.s[i + num_rvds - 1] == 'g')) || (c_upstream != 1 && (seq->seq.s[i + num_rvds - 1] == 'A' || seq->seq.s[i + num_rvds - 1] == 'a'))) { double score = score_binding_site(seq, i, rvd_seq, num_rvds, scoring_matrix, lookahead_array, 1); if(score != -1) { BindingSite *site = create_binding_site(seq, i, num_rvds, score, 1); #pragma omp critical (add_result) array_add(results, site); } } } } } // Identify and print out TAL effector binding sites void find_binding_sites(FILE *log_file, kseq_t *seq, double *lookahead_array, Array *results) { unsigned int num_rvds = *((unsigned int *) hashmap_get(talesf_kwargs, "rvd_seq_len")); if(num_rvds > seq->seq.l) { logger(log_file, "Warning: skipping sequence '%s' since it is shorter than the RVD sequence\n", seq->seq.s); return; } logger(log_file, "Scanning %s for binding sites (length %ld)", seq->name.s, seq->seq.l); cpu_the_whole_shebang(seq, lookahead_array, results); } int run_talesf_task(Hashmap *kwargs) { talesf_kwargs = kwargs; // Options char *seq_filename = hashmap_get(kwargs, "seq_filename"); char *log_filepath = hashmap_get(kwargs, "log_filepath"); unsigned int *rvd_seq = hashmap_get(kwargs, "rvd_seq"); unsigned int rvd_seq_len = *((unsigned int *) hashmap_get(kwargs, "rvd_seq_len")); double best_score = *((double *) hashmap_get(kwargs, "best_score")); double cutoff = *((double *) hashmap_get(kwargs, "cutoff")); int numprocs = *((int *) hashmap_get(kwargs, "num_procs")); double **scoring_matrix = hashmap_get(kwargs, "scoring_matrix"); // Setup the logger FILE *log_file = stdout; if (log_filepath && strcmp(log_filepath, "NA") != 0) { log_file = fopen(log_filepath, "a"); } // Open sequence file gzFile seqfile; seqfile = gzopen(seq_filename, "r"); if (!seqfile) { logger(log_file, "Error: unable to open sequence '%s'", seq_filename); if (log_file != stdout) { fclose(log_file); } return 1; } Array *results = array_new( sizeof(BindingSite *) ); // Define score cutoffs for match sites double *lookahead_array = create_lookahead_array(rvd_seq, rvd_seq_len, cutoff, best_score, scoring_matrix); // Begin processing int abort = 0; omp_set_num_threads(numprocs); kseq_t *seq = kseq_init(seqfile); int result; while ((result = kseq_read(seq)) >= 0) { find_binding_sites(log_file, seq, lookahead_array, results); } kseq_destroy(seq); gzclose(seqfile); if(!abort) { qsort(results->data, array_size(results), sizeof(BindingSite *), binding_site_compare_score); abort = print_results(results, log_file); logger(log_file, "Finished"); } // Free memory if(results) { for(int i = 0; i < array_size(results); i++) { BindingSite *site = (BindingSite *)array_get(results, i); free(site->sequence); free(site->sequence_name); free(site); } array_delete(results, NULL); } if (lookahead_array) { free(lookahead_array); } if(log_file != stdout) { fclose(log_file); } return abort; }
nr_numint.c
/* Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <assert.h> #include "config.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc) { if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) { return 0; } const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int bas_id; int box_id = 0; int bound = BOXSIZE; int has0 = 0; empty[box_id] = 1; for (bas_id = sh0; bas_id < sh1; bas_id++) { empty[box_id] &= !non0table[bas_id]; if (ao_loc[bas_id] == bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = 1; } else if (ao_loc[bas_id] > bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = !non0table[bas_id]; } } return has0; } static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; double beta = 0; if (has0) { int box_id, blen, i, j; size_t b0; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */ static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (has0) { int ib, jb, leni, lenj; int j1 = nbox; size_t b0i, b0j; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &D1, vv+b0i*nao+b0j, &nao); } } } } } else { dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids, &D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao); } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; size_t Nao = nao; NPdset0(vv, Nao * Nao); #pragma omp parallel { int ip, ib; double *v_priv = calloc(nao*nao+2, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } if (hermi != 0) { NPdsymm_triu(nao, vv, hermi); } } // 'nip,np->ip' void VXC_dscale_ao(double *aow, double *ao, double *wv, int comp, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; size_t ao_size = nao * Ngrids; int i, j, ic; double *pao = ao; #pragma omp for schedule(static) for (i = 0; i < nao; i++) { pao = ao + i * Ngrids; for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] = pao[j] * wv[j]; } for (ic = 1; ic < comp; ic++) { for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j]; } } } } } // 'ip,ip->p' void VXC_dcontract_rho(double *rho, double *bra, double *ket, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; int nthread = omp_get_num_threads(); int blksize = MAX((Ngrids+nthread-1) / nthread, 1); int ib, b0, b1, i, j; #pragma omp for for (ib = 0; ib < nthread; ib++) { b0 = ib * blksize; b1 = MIN(b0 + blksize, ngrids); for (j = b0; j < b1; j++) { rho[j] = bra[j] * ket[j]; } for (i = 1; i < nao; i++) { for (j = b0; j < b1; j++) { rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j]; } } } } } void VXC_vv10nlc(double *Fvec, double *Uvec, double *Wvec, double *vvcoords, double *coords, double *W0p, double *W0, double *K, double *Kp, double *RpW, int vvngrids, int ngrids) { #pragma omp parallel { double DX, DY, DZ, R2; double gp, g, gt, T, F, U, W; int i, j; #pragma omp for schedule(static) for (i = 0; i < ngrids; i++) { F = 0; U = 0; W = 0; for (j = 0; j < vvngrids; j++) { DX = vvcoords[j*3+0] - coords[i*3+0]; DY = vvcoords[j*3+1] - coords[i*3+1]; DZ = vvcoords[j*3+2] - coords[i*3+2]; R2 = DX*DX + DY*DY + DZ*DZ; gp = R2*W0p[j] + Kp[j]; g = R2*W0[i] + K[i]; gt = g + gp; T = RpW[j] / (g*gp*gt); F += T; T *= 1./g + 1./gt; U += T; W += T * R2; } Fvec[i] = F * -1.5; Uvec[i] = U; Wvec[i] = W; } } }
mdatom.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team, * check out http://www.gromacs.org for more information. * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "typedefs.h" #include "mdatoms.h" #include "smalloc.h" #include "main.h" #include "qmmm.h" #include "mtop_util.h" #include "gmx_omp_nthreads.h" #define ALMOST_ZERO 1e-30 t_mdatoms *init_mdatoms(FILE *fp, gmx_mtop_t *mtop, gmx_bool bFreeEnergy) { int mb, a, g, nmol; double tmA, tmB; t_atom *atom; t_mdatoms *md; gmx_mtop_atomloop_all_t aloop; t_ilist *ilist; snew(md, 1); md->nenergrp = mtop->groups.grps[egcENER].nr; md->bVCMgrps = FALSE; tmA = 0.0; tmB = 0.0; aloop = gmx_mtop_atomloop_all_init(mtop); while (gmx_mtop_atomloop_all_next(aloop, &a, &atom)) { if (ggrpnr(&mtop->groups, egcVCM, a) > 0) { md->bVCMgrps = TRUE; } if (bFreeEnergy && PERTURBED(*atom)) { md->nPerturbed++; if (atom->mB != atom->m) { md->nMassPerturbed++; } if (atom->qB != atom->q) { md->nChargePerturbed++; } } tmA += atom->m; tmB += atom->mB; } md->tmassA = tmA; md->tmassB = tmB; if (bFreeEnergy && fp) { fprintf(fp, "There are %d atoms and %d charges for free energy perturbation\n", md->nPerturbed, md->nChargePerturbed); } md->bOrires = gmx_mtop_ftype_count(mtop, F_ORIRES); return md; } void atoms2md(gmx_mtop_t *mtop, t_inputrec *ir, int nindex, int *index, int start, int homenr, t_mdatoms *md) { gmx_mtop_atomlookup_t alook; int i; t_grpopts *opts; gmx_groups_t *groups; gmx_molblock_t *molblock; opts = &ir->opts; groups = &mtop->groups; molblock = mtop->molblock; /* Index==NULL indicates particle decomposition, * unless we have an empty DD node, so also check for homenr and start. * This should be signaled properly with an extra parameter or nindex==-1. */ if (index == NULL && (homenr > 0 || start > 0)) { md->nr = mtop->natoms; } else { md->nr = nindex; } if (md->nr > md->nalloc) { md->nalloc = over_alloc_dd(md->nr); if (md->nMassPerturbed) { srenew(md->massA, md->nalloc); srenew(md->massB, md->nalloc); } srenew(md->massT, md->nalloc); srenew(md->invmass, md->nalloc); srenew(md->chargeA, md->nalloc); if (md->nPerturbed) { srenew(md->chargeB, md->nalloc); } srenew(md->typeA, md->nalloc); if (md->nPerturbed) { srenew(md->typeB, md->nalloc); } srenew(md->ptype, md->nalloc); if (opts->ngtc > 1) { srenew(md->cTC, md->nalloc); /* We always copy cTC with domain decomposition */ } srenew(md->cENER, md->nalloc); if (opts->ngacc > 1) { srenew(md->cACC, md->nalloc); } if (opts->nFreeze && (opts->ngfrz > 1 || opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ])) { srenew(md->cFREEZE, md->nalloc); } if (md->bVCMgrps) { srenew(md->cVCM, md->nalloc); } if (md->bOrires) { srenew(md->cORF, md->nalloc); } if (md->nPerturbed) { srenew(md->bPerturbed, md->nalloc); } /* Note that these user t_mdatoms array pointers are NULL * when there is only one group present. * Therefore, when adding code, the user should use something like: * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex]) */ if (mtop->groups.grpnr[egcUser1] != NULL) { srenew(md->cU1, md->nalloc); } if (mtop->groups.grpnr[egcUser2] != NULL) { srenew(md->cU2, md->nalloc); } if (ir->bQMMM) { srenew(md->bQM, md->nalloc); } if (ir->bAdress) { srenew(md->wf, md->nalloc); srenew(md->tf_table_index, md->nalloc); } } alook = gmx_mtop_atomlookup_init(mtop); #pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntDefault)) schedule(static) for (i = 0; i < md->nr; i++) { int g, ag, molb; real mA, mB, fac; t_atom *atom; if (index == NULL) { ag = i; } else { ag = index[i]; } gmx_mtop_atomnr_to_atom(alook, ag, &atom); if (md->cFREEZE) { md->cFREEZE[i] = ggrpnr(groups, egcFREEZE, ag); } if (EI_ENERGY_MINIMIZATION(ir->eI)) { /* Displacement is proportional to F, masses used for constraints */ mA = 1.0; mB = 1.0; } else if (ir->eI == eiBD) { /* With BD the physical masses are irrelevant. * To keep the code simple we use most of the normal MD code path * for BD. Thus for constraining the masses should be proportional * to the friction coefficient. We set the absolute value such that * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2 * Then if we set the (meaningless) velocity to v=dx/dt, we get the * correct kinetic energy and temperature using the usual code path. * Thus with BD v*dt will give the displacement and the reported * temperature can signal bad integration (too large time step). */ if (ir->bd_fric > 0) { mA = 0.5*ir->bd_fric*ir->delta_t; mB = 0.5*ir->bd_fric*ir->delta_t; } else { /* The friction coefficient is mass/tau_t */ fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0]; mA = 0.5*atom->m*fac; mB = 0.5*atom->mB*fac; } } else { mA = atom->m; mB = atom->mB; } if (md->nMassPerturbed) { md->massA[i] = mA; md->massB[i] = mB; } md->massT[i] = mA; if (mA == 0.0) { md->invmass[i] = 0; } else if (md->cFREEZE) { g = md->cFREEZE[i]; if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ]) { /* Set the mass of completely frozen particles to ALMOST_ZERO iso 0 * to avoid div by zero in lincs or shake. * Note that constraints can still move a partially frozen particle. */ md->invmass[i] = ALMOST_ZERO; } else { md->invmass[i] = 1.0/mA; } } else { md->invmass[i] = 1.0/mA; } md->chargeA[i] = atom->q; md->typeA[i] = atom->type; if (md->nPerturbed) { md->chargeB[i] = atom->qB; md->typeB[i] = atom->typeB; md->bPerturbed[i] = PERTURBED(*atom); } md->ptype[i] = atom->ptype; if (md->cTC) { md->cTC[i] = groups->grpnr[egcTC][ag]; } md->cENER[i] = (groups->grpnr[egcENER] ? groups->grpnr[egcENER][ag] : 0); if (md->cACC) { md->cACC[i] = groups->grpnr[egcACC][ag]; } if (md->cVCM) { md->cVCM[i] = groups->grpnr[egcVCM][ag]; } if (md->cORF) { md->cORF[i] = groups->grpnr[egcORFIT][ag]; } if (md->cU1) { md->cU1[i] = groups->grpnr[egcUser1][ag]; } if (md->cU2) { md->cU2[i] = groups->grpnr[egcUser2][ag]; } if (ir->bQMMM) { if (groups->grpnr[egcQMMM] == 0 || groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1) { md->bQM[i] = TRUE; } else { md->bQM[i] = FALSE; } } /* Initialize AdResS weighting functions to adressw */ if (ir->bAdress) { md->wf[i] = 1.0; /* if no tf table groups specified, use default table */ md->tf_table_index[i] = DEFAULT_TF_TABLE; if (ir->adress->n_tf_grps > 0) { /* if tf table groups specified, tf is only applied to thoose energy groups*/ md->tf_table_index[i] = NO_TF_TABLE; /* check wether atom is in one of the relevant energy groups and assign a table index */ for (g = 0; g < ir->adress->n_tf_grps; g++) { if (md->cENER[i] == ir->adress->tf_table_index[g]) { md->tf_table_index[i] = g; } } } } } gmx_mtop_atomlookup_destroy(alook); md->start = start; md->homenr = homenr; md->lambda = 0; } void update_mdatoms(t_mdatoms *md, real lambda) { int al, end; real L1 = 1.0-lambda; end = md->nr; if (md->nMassPerturbed) { for (al = 0; (al < end); al++) { if (md->bPerturbed[al]) { md->massT[al] = L1*md->massA[al]+ lambda*md->massB[al]; if (md->invmass[al] > 1.1*ALMOST_ZERO) { md->invmass[al] = 1.0/md->massT[al]; } } } md->tmass = L1*md->tmassA + lambda*md->tmassB; } else { md->tmass = md->tmassA; } md->lambda = lambda; }
queue.h
// -*- C++ -*- // Copyright (C) 2007-2020 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template<typename _Tp> class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp* _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param __max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp& __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_PARALLEL_ASSERTIONS // Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp& __t) { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp& __t) //queue behavior { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
Searching.202002250815.buckets_equal_width.h
// // Created by Zhen Peng on 02/25/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <algorithm> #include <omp.h> #include "../../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../../include/utils.h" #include "../../include/Candidate.h" #include "../../include/parallelization.h" //#include "../include/Buckets.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; int dimension_ = 0; // uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, const Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); // template<typename T> static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); static idi merge_two_queues_into_1st_queue_seq( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); // const idi limit_size); static idi merge_two_queues_into_1st_queue_para( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_bkt_arrays_into_1st_bkt_array_seq( std::vector< std::vector<Candidate> > &a_array, // idi a_bound, std::vector< std::vector<Candidate> > &b_array, const idi b_bound); static idi add_into_CandidatesBuckets( const Candidate &cand, std::vector< std::vector<Candidate> > &buckets, const distf bucket_lower, const distf overall_width, const distf bucket_width); idi merge_all_queues_para( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); idi merge_all_buckets_para( std::vector< std::vector< std::vector<Candidate> > > &local_buckets_list, std::vector<idi> &local_insert_locations, std::vector< std::vector<Candidate> > &global_buckets, const idi num_buckets); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); void para_search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void para_prepare_init_ids( // std::vector<unsigned> &init_ids, // unsigned L) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; auto &set_K_q_i = set_K_list[q_i]; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_q_i[n_i] == true_id) { if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // std::vector<Candidate> set_L(L+1); // std::vector<idi> init_ids(L); boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } // { // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // unsigned out_degree = *out_edges++; // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { // init_ids[tmp_l] = out_edges[tmp_l]; // } // // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (tmp_l < L) { // tmp_id %= num_v_; // unsigned id = tmp_id++; // if (is_visited[id]) { // continue; // } // is_visited[id] = true; // init_ids[tmp_l] = id; // tmp_l++; // } // } // const std::vector<dataf> &query = queries_load_[query_id]; // std::vector<char> is_checked(L + 1, 0); // boost::dynamic_bitset<> is_checked(num_v_); // cache_miss_kernel.measure_stop(); // cache_miss_kernel.measure_start(); const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; // _mm_prefetch(reinterpret_cast<char *>(data_load_ + v_id * dimension_), _MM_HINT_T0); _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // cache_miss_kernel.measure_stop(); // cache_miss_kernel.measure_start(); idi k = 0; // Index of every queue's first unchecked candidate. while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, const PANNS::Candidate &cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } //template<typename T> inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } inline idi Searching::merge_two_queues_into_1st_queue_seq( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // auto *queue1_base = queue1.data() + queue1_start; // Insert the 1st of queue2 insert_one_element_at( queue2[queue2_start], // queue1.data(), queue1, insert_index, queue1_start, queue1_size); if (queue2_size == 1) { return insert_index; } // memmove(reinterpret_cast<char *>(queue1_base + insert_index + 1), // reinterpret_cast<char *>(queue1_base + insert_index), // (queue1_size - insert_index) * sizeof(Candidate)); // queue1[insert_index] = queue2[queue2_start]; // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { // for (idi insert_i = insert_index + 1; insert_i < q_i_1_bound; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], // queue1.data(), queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } } // // // Merge queue1 and queue2 into tmp_queue. // std::vector<Candidate> tmp_queue(queue1_size + queue2_size); // std::merge(queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2.begin() + queue2_start, // queue2.begin() + queue2_start + queue2_size, // tmp_queue.begin()); // // Resize tmp_queue. // tmp_queue.resize(limit_size); // // // Swap queue1 and tmp_queue // queue1.swap(tmp_queue); return insert_index; } //// Backup //inline idi Searching::merge_two_queues_into_1st_queue_seq( // std::vector<Candidate> &queue1, // const idi queue1_start, // const idi queue1_size, // std::vector<Candidate> &queue2, // const idi queue2_start, // const idi queue2_size, // const idi limit_size) //{ // assert(queue1_size); // // Record the lowest insert location. // auto it_loc = std::lower_bound( // queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2[queue2_start]); // idi insert_loc = it_loc - (queue1.begin() + queue1_start); // // // Merge queue1 and queue2 into tmp_queue. // std::vector<Candidate> tmp_queue(queue1_size + queue2_size); // std::merge(queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2.begin() + queue2_start, // queue2.begin() + queue2_start + queue2_size, // tmp_queue.begin()); // // Resize tmp_queue. // tmp_queue.resize(limit_size); // // // Swap queue1 and tmp_queue // queue1.swap(tmp_queue); // // return insert_loc; //} inline idi Searching::merge_two_queues_into_1st_queue_para( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); if (queue2_size < 16) { // if (true) { // if (queue2_size < 1024) { return merge_two_queues_into_1st_queue_seq( queue1, queue1_start, queue1_size, queue2, queue2_start, queue2_size); } else { auto it1_begin = queue1.begin() + queue1_start; auto it1_end = it1_begin + queue1_size; // auto it2_begin = queue2.begin() + queue2_start; // auto it2_end = it2_begin + queue2_size; // Record the lowest insert location. idi lowest_insert_loc; { auto it_loc = std::lower_bound( it1_begin, it1_end, queue2[queue2_start]); lowest_insert_loc = it_loc - it1_begin; if (lowest_insert_loc == queue1_size) { return lowest_insert_loc; } else if (lowest_insert_loc == queue1_size - 1) { queue1[queue1_start + lowest_insert_loc] = queue2[queue2_start]; return lowest_insert_loc; } } // Partition queue2 and queue1 const idi stride = log2(static_cast<double>(queue2_size)); const idi num_partitions = (queue2_size - 1) / stride + 1; // Partitions in queue1 std::vector<idi> parts_in_q1(num_partitions); parts_in_q1[0] = 0; #pragma omp parallel for for (idi p_i = 1; p_i < num_partitions; ++p_i) { idi q_i = p_i * stride + queue2_start; auto it_loc = std::lower_bound( it1_begin, it1_end, queue2[q_i]); parts_in_q1[p_i] = it_loc - it1_begin; } // Partitions in queue2 std::vector<idi> parts_in_q2(num_partitions); parts_in_q2[0] = 0; #pragma omp parallel for for (idi p_i = 1; p_i < num_partitions; ++p_i) { parts_in_q2[p_i] = p_i * stride; } // Merge // Find partitions needed. std::vector<idi> offsets_queue3; // For the tmp_queue when merging. offsets_queue3.push_back(0); idi elements_count = 0; idi p_i_bound = 0; while (elements_count < queue1_size && p_i_bound < num_partitions) { idi q1_size; idi q2_size; if (p_i_bound != num_partitions - 1) { q1_size = parts_in_q1[p_i_bound + 1] - parts_in_q1[p_i_bound]; q2_size = parts_in_q2[p_i_bound + 1] - parts_in_q2[p_i_bound]; } else { q1_size = queue1_size - parts_in_q1[p_i_bound]; q2_size = queue2_size - parts_in_q2[p_i_bound]; } ++p_i_bound; idi tmp_size = q1_size + q2_size; elements_count += tmp_size; offsets_queue3.push_back(*offsets_queue3.rbegin() + tmp_size); } // Merge into tmp_queue in parallel std::vector<Candidate> tmp_queue(elements_count); #pragma omp parallel for for (idi p_i = 0; p_i < p_i_bound; ++p_i) { idi q1_start = parts_in_q1[p_i] + queue1_start; idi q2_start = parts_in_q2[p_i] + queue2_start; idi q1_end; idi q2_end; if (p_i != num_partitions - 1) { q1_end = parts_in_q1[p_i + 1] + queue1_start; q2_end = parts_in_q2[p_i + 1] + queue1_start; } else { q1_end = queue1_size + queue1_start; q2_end = queue2_size + queue2_start; } std::merge( queue1.begin() + q1_start, queue1.begin() + q1_end, queue2.begin() + q2_start, queue2.begin() + q2_end, tmp_queue.begin() + offsets_queue3[p_i]); } if (elements_count > queue1_size) { tmp_queue.resize(queue1_size); } tmp_queue.swap(queue1); //// Deprecated. Wrong implementation. ////#pragma omp parallel for // for (idi p_i = 0; p_i < num_partitions; ++p_i) { // idi q1_start = parts_in_q1[p_i] + queue1_start; // idi q2_start = parts_in_q2[p_i] + queue2_start; // idi q1_size; // idi q2_size; // if (p_i != num_partitions - 1) { // q1_size = parts_in_q1[p_i + 1] - parts_in_q1[p_i]; // q2_size = parts_in_q2[p_i + 1] - parts_in_q2[p_i]; // } else { // q1_size = queue1_size - parts_in_q1[p_i]; // q2_size = queue2_size - parts_in_q2[p_i]; // } // if (0 == q1_size || 0 == q2_size) continue; // merge_two_queues_into_1st_queue_seq( // queue1, // q1_start, // q1_size, // queue2, // q2_start, // q2_size); // } return lowest_insert_loc; } // ////////////////////////////////////////// // //// Backup // // Record the lowest insert location. // auto it_loc = std::lower_bound( // queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2[queue2_start]); // idi insert_loc = it_loc - (queue1.begin() + queue1_start); // // auto *queue1_base = queue1.data() + queue1_start; // // Insert the 1st of queue2 // insert_one_element_at( // queue2[queue2_start], // queue1_base, // insert_loc, // queue1_size); // // // Insert // idi q_i_1 = insert_loc + 1; // idi q_i_2 = queue2_start + 1; // const idi q_i_1_bound = queue1_start + queue1_size; // const idi q_i_2_bound = queue2_start + queue2_size; //// const idi insert_i_bound = queue1_start + limit_size; // for (idi insert_i = insert_loc + 1; insert_i < q_i_1_bound; ++insert_i) { // if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // // queue1 or queue2 finished traverse. Rest o // break; // } else if (queue1[q_i_1] < queue2[q_i_2]) { // ++q_i_1; // } else { // // Insert queue2[q_i_2] into queue1 // insert_one_element_at( // queue2[q_i_2++], // queue1_base, // insert_i, // queue1_size); // ++q_i_1; // } // } // // return insert_loc; } inline void Searching::merge_two_bkt_arrays_into_1st_bkt_array_seq( std::vector< std::vector<Candidate> > &a_array, // const idi a_bound, std::vector< std::vector<Candidate> > &b_array, const idi b_bound) { // idi a_size = a_bucket.size(); // idi b_size = b_bucket.size(); // a_bucket.resize(a_size + b_size); // std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { // for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { auto &b_bucket = b_array[bk_i]; if (b_bucket.empty()) continue; auto &a_bucket = a_array[bk_i]; // a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // TODO: memory optimization needed. // { idi a_size = a_bucket.size(); idi b_size = b_bucket.size(); a_bucket.resize(a_size + b_size); std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); // } b_bucket.clear(); } // if (a_bound < b_bound) { // a_bound = b_bound; // } // b_bound = 0; } inline idi Searching::add_into_CandidatesBuckets( const Candidate &cand, std::vector< std::vector<Candidate> > &buckets, const distf bucket_lower, const distf overall_width, const distf bucket_width) { idi b_i; if (cand.distance_ < bucket_lower) { b_i = 0; // buckets[0].push_back(cand); } else { b_i = (cand.distance_ - bucket_lower) / overall_width / bucket_width; // buckets[b_i].push_back(cand); } buckets[b_i].push_back(cand); return b_i; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { boost::dynamic_bitset<> is_visited(num_v_); { // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // unsigned out_degree = *out_edges++; // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { // init_ids[tmp_l] = out_edges[tmp_l]; // } // // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (tmp_l < L) { // tmp_id %= num_v_; // unsigned id = tmp_id++; // if (is_visited[id]) { // continue; // } // is_visited[id] = true; // init_ids[tmp_l] = id; // tmp_l++; // } // //// while (tmp_l < L) { //// unsigned id = rand() % num_v_; //// if (is_visited[id]) { //// continue; //// } //// is_visited[id] = true; //// init_ids[tmp_l] = id; //// tmp_l++; //// } for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } // ///////////////////////////////////////// // Candidate &top_cand = set_L[k]; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchekced candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } inline idi Searching::merge_all_buckets_para( std::vector< std::vector< std::vector<Candidate> > > &local_buckets_list, std::vector<idi> &local_insert_locations, std::vector< std::vector<Candidate> > &global_buckets, const idi num_buckets) { idi num_arrays = num_threads_; idi size = 1 << (static_cast<idi>(log2(num_arrays))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (idi i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; idi bi = i + (1 << d) - 1; auto &a_array = local_buckets_list[ai]; auto &b_array = local_buckets_list[bi]; idi &a_bound = local_insert_locations[ai]; idi &b_bound = local_insert_locations[bi]; ////#pragma omp parallel for //// Fine-grained is slower. // for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { //// for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = a_array[bk_i]; // merge_two_buckets_into_1st_bucket_set(a_bucket, b_bucket); //// a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // TODO: memory optimization needed. //// { //// idi a_size = a_bucket.size(); //// idi b_size = b_bucket.size(); //// a_bucket.resize(a_size + b_size); //// std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); //// } // b_bucket.clear(); // } // if (a_bound < b_bound) { // a_bound = b_bound; // } // b_bound = num_threads_; merge_two_bkt_arrays_into_1st_bkt_array_seq( a_array, // a_bound, b_array, num_buckets); // b_bound); if (a_bound < b_bound) { a_bound = b_bound; } b_bound = 0; } } if (size != num_arrays) { for (idi a_i = size; a_i < num_arrays; ++a_i) { idi ai = a_i; idi bi = ai - 1; auto &a_array = local_buckets_list[ai]; auto &b_array = local_buckets_list[bi]; idi &a_bound = local_insert_locations[ai]; idi &b_bound = local_insert_locations[bi]; ////#pragma omp parallel for //// Fine-grained is slower. // for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { //// for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = a_array[bk_i]; // merge_two_buckets_into_1st_bucket_set(a_bucket, b_bucket); //// a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // TODO: memory optimization needed. //// { //// idi a_size = a_bucket.size(); //// idi b_size = b_bucket.size(); //// a_bucket.resize(a_size + b_size); //// std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); //// } // b_bucket.clear(); // } // if (a_bound < b_bound) { // a_bound = b_bound; // } // b_bound = num_threads_; merge_two_bkt_arrays_into_1st_bkt_array_seq( a_array, // a_bound, b_array, num_buckets); // b_bound); if (a_bound < b_bound) { a_bound = b_bound; } b_bound = 0; } } // Merge into the global_buckets. idi first_bucket = 0; auto &b_array = *local_buckets_list.rbegin(); while (first_bucket < num_buckets && b_array[first_bucket].size() == 0) { ++first_bucket; } idi &b_bound = *local_insert_locations.rbegin(); ////#pragma omp parallel for //// Fine-grained is slower. // for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { //// for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = global_buckets[bk_i]; // merge_two_buckets_into_1st_bucket_set(a_bucket, b_bucket); //// a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); //// { //// idi a_size = a_bucket.size(); //// idi b_size = b_bucket.size(); //// a_bucket.resize(a_size + b_size); //// std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); //// } // b_bucket.clear(); // } // b_bound = num_threads_; merge_two_bkt_arrays_into_1st_bkt_array_seq( global_buckets, b_array, num_buckets); // b_bound); b_bound = 0; return first_bucket; } inline idi Searching::merge_all_queues_para( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } //// Using local buckets and then merge. inline void Searching::para_search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { // const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // boost::dynamic_bitset<> is_visited(num_v_); std::vector<uint8_t> is_visited(num_v_, 0); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // Get the distances of all candidates, store in the set set_L. distf dist_lower = DISTF_MAX; distf dist_upper = DISTF_MIN; #pragma omp parallel for reduction(max : dist_upper) reduction(min : dist_lower) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. dist_lower = std::min(dist_lower, dist); dist_upper = std::max(dist_upper, dist); // if (dist < dist_lower) { // dist_lower = dist; // } // else if (dist > dist_upper) { //// if (dist > dist_upper) { // dist_upper = dist; // } } // std::sort(set_L.begin(), set_L.begin() + L); // Compute the width for buckets const idi num_buckets = 64; const distf bucket_width = 1.0 / num_buckets; // CandidatesBuckets global_buckets(num_buckets, set_L.begin()->distance_, set_L.rbegin()->distance_); std::vector< std::vector<Candidate> > global_buckets(num_buckets); std::vector< std::vector< std::vector<Candidate> > > local_buckets_list( num_threads_, std::vector< std::vector<Candidate> >(num_buckets)); std::vector<idi> local_insert_locations(num_threads_, 0); // Record a local array's highest bucket id that has new insertion. distf bucket_lower = dist_lower; distf bucket_upper = dist_upper >= 0 ? dist_upper + 1 : dist_upper - 1; // TODO: is it proper? // distf bucket_lower = set_L.begin()->distance_; // distf bucket_upper = set_L.rbegin()->distance_; distf overall_width = bucket_upper - bucket_lower; // Copy set_L into global_bucket. for (idi q_i = 0; q_i < L; ++q_i) { // idi b_i = (set_L[q_i].distance_ - bucket_lower) / overall_width / bucket_width; // global_buckets[b_i].push_back(set_L[q_i]); add_into_CandidatesBuckets( set_L[q_i], global_buckets, bucket_lower, overall_width, bucket_width); } std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. idi b_index = 0; // Index of the first bucket that contains unchecked candidates. bool is_finished = false; idi tmp_count = 0; // for debug // while (k < L) { while (!is_finished) { ++tmp_count; // Select M candidates idi last_b = num_buckets; // The last index of the bucket that contains unchecked candidates. Used for updating b_index. for (idi b_i = b_index; b_i < num_buckets && top_m_candidates_end < M; ++b_i) { // Traverse bucket b_i auto &bucket_i = global_buckets[b_i]; idi q_i_bound = bucket_i.size(); for (idi q_i = 0; q_i < q_i_bound && top_m_candidates_end < M; ++q_i) { if (bucket_i[q_i].is_checked_) { continue; } last_b = b_i; bucket_i[q_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = bucket_i[q_i].id_; } } //// Backup // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { if (dist > dist_upper) { continue; } Candidate cand(nb_id, dist, false); // Add to the local buckets. idi r = add_into_CandidatesBuckets( cand, local_buckets_list[tid], bucket_lower, overall_width, bucket_width); if (r > local_insert_locations[tid]) { local_insert_locations[tid] = r; } // idi b_i = (cand.distance_ - bucket_lower) / overall_width / bucket_width; // local_buckets_list[tid][b_i].push_back(cand); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates idi lowest_b_i = num_buckets; // The largest index of the bucket that has new candidates // Merge buckets. Merge all local buckets in parallel. { // if (num_threads_ > 1) { lowest_b_i = merge_all_buckets_para( local_buckets_list, local_insert_locations, global_buckets, num_buckets); // } else { // auto &b_array = local_buckets_list[0]; // while (b_array[lowest_b_i].size() == 0) { // ++lowest_b_i; // } // for (idi bk_i = lowest_b_i; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = global_buckets[bk_i]; // a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // } // } } {// Update the start bucket. if (lowest_b_i <= last_b) { b_index = lowest_b_i; } else { b_index = last_b + 1; } // TODO: dynamic buckets width. } {// Check the converge condition if (b_index >= num_buckets) { is_finished = true; } else { idi fixed_count = 0; for (idi b_i = 0; b_i < b_index && fixed_count < L; ++b_i) { fixed_count += global_buckets[b_i].size(); } if (fixed_count >= L) { is_finished = true; } // else { // // Update dist_upper // idi b_i = b_index; // for ( ; b_i < num_buckets && fixed_count < L; ++b_i) { // fixed_count += global_buckets[b_i].size(); // } // distf tmp_upper = b_i * bucket_width * overall_width + bucket_lower; // if (tmp_upper < dist_upper) { // dist_upper = tmp_upper; // } // } } } // {//test // // Print global_buckets' sizes // printf("sizes:"); // for (idi b_i = 0; b_i < num_buckets; ++b_i) { // printf(" %u:%lu", // b_i, global_buckets[b_i].size()); // } // printf("\n"); // } // idi nk = L; // // Merge. Parallel merging in every two queues. // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_para( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } } //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } {// Move to set_K idi count = 0; idi last_b = 0; for ( ; last_b < num_buckets && count < K; ++last_b) { auto &bucket_i = global_buckets[last_b]; count += bucket_i.size(); std::sort(bucket_i.begin(), bucket_i.end()); } idi k_i = 0; for (idi b_i = 0; b_i < last_b && k_i < K; ++b_i) { const auto &bucket_i = global_buckets[b_i]; const idi size_b = bucket_i.size(); for (idi q_i = 0; q_i < size_b && k_i < K; ++q_i) { set_K[k_i++] = bucket_i[q_i].id_; } } } // {//// DEPRECATED. Cannot guarantee the accuracy. // idi count = 0; // for (idi b_i = 0; b_i < num_buckets && count < K; ++b_i) { // auto &bucket_i = global_buckets[b_i]; // idi size_b = bucket_i.size(); // if (count + size_b >= K) { // idi remain = K - count; //// std::copy_n(set_L.begin() + count, remain, bucket_i.begin()); // std::memmove(set_L.data() + count, bucket_i.data(), remain * sizeof(Candidate)); // count = K; // } else { //// std::copy_n(set_L.begin() + count, size_b, bucket_i.begin()); // std::memmove(set_L.data() + count, bucket_i.data(), size_b * sizeof(Candidate)); // count += size_b; // } // } // } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("query_id: %u " // "set_L[%u]: " // "id: %u " // "dist: %f\n", // query_id, // k_i, // set_L[k_i].id_, // set_L[k_i].distance_); // } // exit(1); // } } ////// Backup: using OpenMP critical clause //inline void Searching::para_search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; //#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // DEPRECATED. No enough workload for OpenMP, and hard to implement efficiently. ///** // * Prepare init_ids and flags, as they are constant for all queries. // * @param[out] init_ids // * @param L // */ //inline void Searching::para_prepare_init_ids( // std::vector<unsigned int> &init_ids, // unsigned L) const //{ //// idi num_ngbrs = get_out_degree(ep_); //// edgei edge_start = nsg_graph_indices_[ep_]; //// // Store ep_'s neighbors as candidates //// idi tmp_l = 0; //// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { //// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; //// } //// std::unordered_set<idi> visited_ids; // std::vector<uint8_t> is_selected(num_v_, 0); //// boost::dynamic_bitset<> is_selected(num_v_); // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // idi init_ids_end = 0; //// idi e_i_bound = out_degree <= L ? out_degree : L; //#pragma omp parallel for // for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { //// for (idi e_i = 0; e_i < e_i_bound; ++e_i) { // idi v_id = out_edges[e_i]; //// if(is_selected[v_id]) { //// continue; //// } //// is_selected[v_id] = 1; // // if (!AtomicOps::CAS(is_selected.data() + v_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // //// init_ids[init_ids_end++] = v_id; // volatile idi old_v = init_ids_end; // volatile idi new_v = old_v + 1; // while (!AtomicOps::CAS(&init_ids_end, old_v, new_v)) { // old_v = init_ids_end; // new_v = old_v + 1; // } // init_ids[old_v] = v_id; // } // //// for (idi i = 0; i < tmp_l; ++i) { //// is_visited[init_ids[i]] = true; //// } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (init_ids_end < L) { // tmp_id %= num_v_; // idi v_id = tmp_id++; // if (is_selected[v_id]) { // continue; // } //// if (visited_ids.find(id) != visited_ids.end()) { //// continue; //// } // is_selected[v_id] = 1; //// visited_ids.insert(id); // init_ids[init_ids_end++] = v_id; //// tmp_l++; // } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
LAGraph_matrix_extract_keep_dimensions.c
//------------------------------------------------------------------------------ // LAGraph_matrix_extract_keep_dimensions: extract submatrix but keep the // dimensions of the original matrix // ------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_Matrix_extract_keep_dimensions: Contributed by Gabor Szarnyas. // Budapest University of Technology and Economics // (with accented characters: G\'{a}bor Sz\'{a}rnyas). // Compute the #include "LAGraph_internal.h" #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (C) ; \ LAGRAPH_FREE (type) ; \ } typedef struct { const GrB_Index nv; // number of vertices const bool* Vdense; // array denoting whether a vertex should be kept } Vdense_struct_type; bool select_submatrix_elements_fun(const GrB_Index i, const GrB_Index j, const void *x, const void *thunk) ; bool select_submatrix_elements_fun(const GrB_Index i, const GrB_Index j, const void *x, const void *thunk) { Vdense_struct_type* indices = (Vdense_struct_type*) (thunk); return indices->Vdense[i] && indices->Vdense[j]; } //------------------------------------------------------------------------------ GrB_Info LAGraph_Matrix_extract_keep_dimensions // extract submatrix but keep // the dimensions of the // original matrix ( GrB_Matrix *Chandle, // output matrix const GrB_Matrix A, // input matrix const GrB_Index *Vsparse, // sorted list of vertex indices const bool *Vdense, // boolean array of vertices GrB_Index nv // number of vertex indices ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Type type; GrB_Index n ; GrB_Matrix C = NULL ; LAGr_Matrix_type(&type, A) LAGr_Matrix_nrows (&n, A) LAGr_Matrix_new (&C, type, n, n) if (Vsparse == NULL && Vdense == NULL) { LAGRAPH_ERROR("Both Vsparse and Vdense are set to NULL", GrB_NULL_POINTER) } if (Vsparse == NULL) // use Vdense and GxB_select { Vdense_struct_type vdense_struct = {.nv = nv, .Vdense = Vdense}; GrB_Type Vdense_type; LAGr_Type_new(&Vdense_type, sizeof(vdense_struct)) GxB_Scalar vdense_thunk; LAGr_Scalar_new(&vdense_thunk, Vdense_type) LAGr_Scalar_setElement(vdense_thunk, (void*) &vdense_struct) GxB_SelectOp select_submatrix_elements_op; LAGr_SelectOp_new(&select_submatrix_elements_op, select_submatrix_elements_fun, NULL, Vdense_type) LAGr_select(C, NULL, NULL, select_submatrix_elements_op, A, vdense_thunk, NULL) LAGRAPH_FREE(select_submatrix_elements_op) LAGRAPH_FREE(vdense_thunk) LAGRAPH_FREE(Vdense_type) } else { GrB_Matrix D; // diagonal matrix used to select rows/columns LAGr_Matrix_new(&D, GrB_BOOL, n, n); bool* X = LAGraph_malloc(nv, sizeof(GrB_BOOL)) ; if (X == NULL) { LAGRAPH_ERROR("out of memory", GrB_OUT_OF_MEMORY) } int nthreads = LAGraph_get_nthreads( ) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index i = 0; i < nv; i++) { X[i] = true; } LAGr_Matrix_build(D, Vsparse, Vsparse, X, nv, GrB_LOR) GxB_Format_Value A_format; LAGRAPH_OK(GxB_get(A, GxB_FORMAT, &A_format)) if (A_format == GxB_BY_ROW) // C = (D*A)*D { LAGr_mxm(C, NULL, NULL, GxB_ANY_SECOND_FP64, D, A, NULL) LAGr_mxm(C, NULL, NULL, GxB_ANY_FIRST_FP64, C, D, NULL) } else // A_format == GxB_BY_COL: C = D*(A*D) { LAGr_mxm(C, NULL, NULL, GxB_ANY_FIRST_FP64, A, D, NULL) LAGr_mxm(C, NULL, NULL, GxB_ANY_SECOND_FP64, D, C, NULL) } LAGRAPH_FREE(D); } (*Chandle) = C ; return (GrB_SUCCESS) ; }
GB_binop__bshift_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint32) // C=scalar+B GB (_bind1st__bshift_uint32) // C=scalar+B' GB (_bind1st_tran__bshift_uint32) // C=A+scalar GB (_bind2nd__bshift_uint32) // C=A'+scalar GB (_bind2nd_tran__bshift_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT32 || GxB_NO_BSHIFT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openssl_enc_fmt_plug.c
/* OpenSSL "enc" cracker for JtR. * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall.com> * * $ openssl enc -aes-256-cbc -p -e -a -salt -in hello.txt -out hello.txt.enc * enter aes-256-cbc encryption password: * Verifying - enter aes-256-cbc encryption password: * salt=305CEDC2A0521011 * key=E08A1E6E1493BD3D3DAA25E112259D1688F7A0302AC8C16208DBDCEF179765F0 * iv =582FDDF9603B9B03A54FC0BB34370DDE * * $ cat hello.txt * 123456789012 * * Input Format: * * $openssl$cipher$md$salt-size$salt$last-chunks$inlined$known-plaintext$plaintext * $openssl$cipher$md$salt-size$salt$last-chunks$0$datalen$data$known-plaintext$plaintext */ #if FMT_EXTERNS_H extern struct fmt_main fmt_openssl; #elif FMT_REGISTERS_H john_register_one(&fmt_openssl); #else #if AC_BUILT #include "autoconfig.h" #endif #ifdef __CYGWIN__ // cygwin has HORRIBLE performance GOMP for this format it runs at 1/#cpu's the speed of OMP_NUM_THREADS=1 or non-GMP build #undef _OPENMP #undef FMT_OMP #undef FMT_OMP_BAD #define FMT_OMP 0 #define FMT_OMP_BAD 0 #endif #include <string.h> #include <errno.h> #if !AC_BUILT || HAVE_FCNTL_H #include <fcntl.h> #endif #include <stdlib.h> #include <stdint.h> #include <sys/types.h> #include "aes.h" #include "md5.h" #include "sha.h" #include "openssl_code.h" #include "arch.h" #include "misc.h" #include "params.h" #include "common.h" #include "formats.h" #include "jumbo.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "openssl-enc" #define FORMAT_NAME "OpenSSL \"enc\" encryption" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 8 #define MAX_KEYS_PER_CRYPT 8 #define PLAINTEXT_LENGTH 125 #define FORMAT_TAG "$openssl$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[16]; int cipher; int md; int inlined; int kpa; int datalen; unsigned char kpt[256]; unsigned char data[1024]; unsigned char last_chunks[32]; } *cur_salt; static struct fmt_tests tests[] = { {"$openssl$1$0$8$a1a5e529c8d92da5$8de763bf61377d365243993137ad9729$1$0", "password"}, {"$openssl$1$1$8$844527fb2f5d7ad5$ebccb1fcd2b1b30c5c3624d4016978ea$1$0", "password"}, {"$openssl$0$0$8$305cedc2a0521011$bf11609a01e78ec3f50f0cc483e636f9$1$0", "password"}, {"$openssl$0$0$8$305cedc2a0521011$bf11609a01e78ec3f50f0cc483e636f9$1$1$123456", "password"}, {"$openssl$0$0$8$3993671be477e8f0$95384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$0$256$9bbbc2af64ba27444370e3b3db6f4077a5b83c099a9b0a13d0c03dbc89185aad078266470bb15c44e7b35aef66f456ba7f44fb0f60824331f5b598347cd471c6745374c7dbecf49a1dd0378e938bb9d3d68703e3038805fb3c7bf0623222bcc8e9375b10853aa7c991ddd086b8e2a97dd9ddd351ee0facde9bc3529742f0ffab990db046f5a64765d7a4b1c83b0290acae3eaa09278933cddcf1fed0ab14d408cd43fb73d830237dcd681425cd878bf4b542c108694b90e82f912c4aa4de02bd002dce975c2bb308aad933bfcfd8375d91837048d110f007ba3852dbb498a54595384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$0", "password"}, {"$openssl$0$0$8$3993671be477e8f0$95384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$0$256$9bbbc2af64ba27444370e3b3db6f4077a5b83c099a9b0a13d0c03dbc89185aad078266470bb15c44e7b35aef66f456ba7f44fb0f60824331f5b598347cd471c6745374c7dbecf49a1dd0378e938bb9d3d68703e3038805fb3c7bf0623222bcc8e9375b10853aa7c991ddd086b8e2a97dd9ddd351ee0facde9bc3529742f0ffab990db046f5a64765d7a4b1c83b0290acae3eaa09278933cddcf1fed0ab14d408cd43fb73d830237dcd681425cd878bf4b542c108694b90e82f912c4aa4de02bd002dce975c2bb308aad933bfcfd8375d91837048d110f007ba3852dbb498a54595384ad4fb11d737dc7ba884ccece94698b46d68d28c5cc4297ce37aea91064e$1$00000000", "password"}, // natalya.aes-256-cbc {"$openssl$0$2$8$8aabc4a37e4b6247$0135d41c5a82a620e3adac2a3d4f1358d1aa6c747811f98bdfb29157d2b39a55$0$240$65fdecc46300f543bdf4607ccc4e9117da5ab3b6978e98226c1283cb48701dbc2e1ac7593718f363dc381f244e7a404c8a7ff581aa93b702bebf55ed1c8a82fb629830d792053a132cbaeb51292b258d38fb349385af592a94acded393dfb75bc21874e65498360d93d031725028a9e9b0f8edcfcd89c2a4e88784a24712895fca4f463e2089ef7db580d7841301c1d63c640fd79e9d6c0ad3b4fc94fe610eb5f29400e883027e0469537e79c3ee1ae2cd3250b825288c4373c45f5ea6f6f1236681c55bcc4f1eb137c221bb3f42a0480135d41c5a82a620e3adac2a3d4f1358d1aa6c747811f98bdfb29157d2b39a55$1$privkey", "knockers"}, {NULL} }; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } //#define DEBUG_VALID #ifdef DEBUG_VALID // Awesome debug macro for valid() #define return if (printf("\noriginal: %s\n",ciphertext)+printf("fail line %u: '%s' p=%p q=%p q-p-1=%u\n",__LINE__,p,p,q,(unsigned int)(q-p-1)))return #endif static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext, *q = NULL; int len; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; p += TAG_LENGTH; // cipher q = strchr(p, '$'); if (!q) return 0; q = q + 1; if ((q - p - 1) != 1) return 0; if (*p != '0' && *p != '1') return 0; p = q; q = strchr(p, '$'); // md if (!q) return 0; q = q + 1; if ((q - p - 1) != 1) return 0; if (*p != '0' && *p != '1' && *p !='2') return 0; p = q; q = strchr(p, '$'); // salt-size if (!q) return 0; q = q + 1; len = strspn(p, DIGITCHARS); if (len < 1 || len > 2 || len != q - p - 1) return 0; len = atoi(p); if (len < 1 || len > sizeof(cur_salt->salt)) return 0; p = q; q = strchr(p, '$'); // salt if (!q) return 0; q = q + 1; if (2 * len != q - p - 1 || 2 * len != strspn(p, HEXCHARS_lc)) return 0; p = q; q = strchr(p, '$'); // last-chunks if (!q) return 0; q = q + 1; len = strspn(p, HEXCHARS_lc); if (len != q - p - 1 || len < 2 || (len & 1) || len/2 > sizeof(cur_salt->last_chunks)) return 0; p = q; q = strchr(p, '$'); // inlined if (!q) return 0; q = q + 1; if ((q - p - 1) != 1) return 0; if (*p != '0' && *p != '1') return 0; if (*p == '0') { p = q; q = strchr(p, '$'); // datalen if (!q) return 0; q = q + 1; len = strspn(p, DIGITCHARS); if (len < 1 || len > 3 || len != q - p - 1) return 0; len = atoi(p); if (len < 1 || len > sizeof(cur_salt->data)) return 0; p = q; q = strchr(p, '$'); // data if (!q) return 0; q = q + 1; if (2 * len != q - p - 1 || 2 * len != strspn(p, HEXCHARS_all)) return 0; } p = q; q = strchr(p, '$'); // known-plaintext if (!q) return !strcmp(p, "0"); if (strlen(q) == 1) return 0; q = q + 1; if ((q - p - 1) != 1) return 0; if (*p != '0' && *p != '1') return 0; if (strlen(q) > sizeof(cur_salt->kpt) - 1) return 0; #ifdef DEBUG_VALID #undef return #endif return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i, res; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.cipher = atoi(p); p = strtokm(NULL, "$"); cs.md = atoi(p); p = strtokm(NULL, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); res = strlen(p) / 2; for (i = 0; i < res; i++) cs.last_chunks[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.inlined = atoi(p); if (cs.inlined) { p = strtokm(NULL, "$"); cs.kpa = atoi(p); if (cs.kpa) { p = strtokm(NULL, "$"); strncpy((char*)cs.kpt, p, 255); } } else { p = strtokm(NULL, "$"); cs.datalen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.datalen; i++) cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.kpa = atoi(p); if (cs.kpa) { p = strtokm(NULL, "$"); strncpy((char*)cs.kpt, p, 255); } } MEM_FREE(keeptr); return (void *)&cs; } static int kpa(unsigned char *key, unsigned char *iv, int inlined) { AES_KEY akey; unsigned char out[1024]; if (AES_set_decrypt_key(key, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); } if (inlined) { AES_cbc_encrypt(cur_salt->last_chunks, out, 16, &akey, iv, AES_DECRYPT); if (memmem(out, 16, cur_salt->kpt, strlen((char*)cur_salt->kpt))) return 0; } else { AES_cbc_encrypt(cur_salt->data, out, cur_salt->datalen, &akey, iv, AES_DECRYPT); if (memmem(out, cur_salt->datalen, cur_salt->kpt, strlen((char*)cur_salt->kpt))) return 0; } return -1; } static int decrypt(char *password) { unsigned char out[16]; AES_KEY akey; unsigned char iv[16]; unsigned char biv[16]; unsigned char key[32]; int nrounds = 1; // Seems to be fixed as of OpenSSL 1.1.0e (July, 2017) // FIXME handle more stuff switch(cur_salt->cipher) { case 0: switch(cur_salt->md) { case 0: BytesToKey(256, md5, cur_salt->salt, (unsigned char*)password, strlen(password), nrounds, key, iv); AES_set_decrypt_key(key, 256, &akey); break; case 1: BytesToKey(256, sha1, cur_salt->salt, (unsigned char*)password, strlen(password), nrounds, key, iv); AES_set_decrypt_key(key, 256, &akey); break; case 2: BytesToKey(256, sha256, cur_salt->salt, (unsigned char*)password, strlen(password), nrounds, key, iv); AES_set_decrypt_key(key, 256, &akey); break; } break; case 1: switch(cur_salt->md) { case 0: BytesToKey(128, md5, cur_salt->salt, (unsigned char*)password, strlen(password), nrounds, key, iv); AES_set_decrypt_key(key, 128, &akey); break; case 1: BytesToKey(128, sha1, cur_salt->salt, (unsigned char*)password, strlen(password), nrounds, key, iv); AES_set_decrypt_key(key, 128, &akey); break; case 2: BytesToKey(128, sha256, cur_salt->salt, (unsigned char*)password, strlen(password), nrounds, key, iv); AES_set_decrypt_key(key, 128, &akey); break; } break; } memcpy(biv, iv, 16); if (cur_salt->inlined) AES_cbc_encrypt(cur_salt->last_chunks, out, 16, &akey, iv, AES_DECRYPT); else { memcpy(iv, cur_salt->last_chunks, 16); AES_cbc_encrypt(cur_salt->last_chunks + 16, out, 16, &akey, iv, AES_DECRYPT); } // now check padding if (check_pkcs_pad(out, 16, 16) < 0) return -1; if (cur_salt->kpa) return kpa(key, biv, cur_salt->inlined); return 0; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { if (decrypt(saved_key[index]) == 0) cracked[index] = 1; else cracked[index] = 0; } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_openssl = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_NOT_EXACT, /* * FIXME: if there wouldn't be so many false positives, * it would be useful to report some tunable costs */ { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
pvector.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef PVECTOR_H_ #define PVECTOR_H_ #include <algorithm> #include <cstdlib> /* GAP Benchmark Suite Class: pvector Author: Scott Beamer Vector class with ability to not initialize or do initialize in parallel - std::vector (when resizing) will always initialize, and does it serially - When pvector is resized, new elements are uninitialized - Resizing is not thread-safe */ template <typename T_> class pvector { public: typedef T_* iterator; pvector() : start_(nullptr), end_size_(nullptr), end_capacity_(nullptr) {} explicit pvector(size_t num_elements) { start_ = (T_*)aligned_alloc(64, sizeof(T_) * num_elements); end_size_ = start_ + num_elements; end_capacity_ = end_size_; } pvector(size_t num_elements, T_ init_val) : pvector(num_elements) { fill(init_val); } pvector(iterator copy_begin, iterator copy_end) : pvector(copy_end - copy_begin) { #pragma omp parallel for for (size_t i=0; i < capacity(); i++) start_[i] = copy_begin[i]; } // don't want this to be copied, too much data to move pvector(const pvector &other) = delete; // prefer move because too much data to copy pvector(pvector &&other) : start_(other.start_), end_size_(other.end_size_), end_capacity_(other.end_capacity_) { other.start_ = nullptr; other.end_size_ = nullptr; other.end_capacity_ = nullptr; } // want move assignment pvector& operator= (pvector &&other) { start_ = other.start_; end_size_ = other.end_size_; end_capacity_ = other.end_capacity_; other.start_ = nullptr; other.end_size_ = nullptr; other.end_capacity_ = nullptr; return *this; } ~pvector() { if (start_ != nullptr){ //delete[] start_; free(start_); start_ = nullptr; } } // not thread-safe void reserve(size_t num_elements) { if (num_elements > capacity()) { T_ *new_range = (T_*)aligned_alloc(64, sizeof(T_) * num_elements); #pragma omp parallel for for (size_t i=0; i < size(); i++) new_range[i] = start_[i]; end_size_ = new_range + size(); //delete[] start_; if(start_){ free(start_); } start_ = new_range; end_capacity_ = start_ + num_elements; } } bool empty() { return end_size_ == start_; } void clear() { end_size_ = start_; } void resize(size_t num_elements) { reserve(num_elements); end_size_ = start_ + num_elements; } T_& operator[](size_t n) { return start_[n]; } const T_& operator[](size_t n) const { return start_[n]; } void push_back(T_ val) { if (size() == capacity()) { size_t new_size = capacity() == 0 ? 1 : capacity() * growth_factor; reserve(new_size); } *end_size_ = val; end_size_++; } void fill(T_ init_val) { #pragma omp parallel for for (T_* ptr=start_; ptr < end_size_; ptr++) *ptr = init_val; } size_t capacity() const { return end_capacity_ - start_; } size_t size() const { return end_size_ - start_; } iterator begin() const { return start_; } iterator end() const { return end_size_; } T_* data() const { return start_; } void swap(pvector &other) { std::swap(start_, other.start_); std::swap(end_size_, other.end_size_); std::swap(end_capacity_, other.end_capacity_); } private: T_* start_; T_* end_size_; T_* end_capacity_; static const size_t growth_factor = 2; }; #endif // PVECTOR_H_
GB_binop__isgt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int32) // A*D function (colscale): GB (_AxD__isgt_int32) // D*A function (rowscale): GB (_DxB__isgt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int32) // C=scalar+B GB (_bind1st__isgt_int32) // C=scalar+B' GB (_bind1st_tran__isgt_int32) // C=A+scalar GB (_bind2nd__isgt_int32) // C=A'+scalar GB (_bind2nd_tran__isgt_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__trunc_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fc64_fc64) // op(A') function: GB (_unop_tran__trunc_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_ctrunc (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctrunc (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_ctrunc (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_ctrunc (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_ctrunc (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
serial_tree_learner.h
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/tree_learner.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/feature_histogram.h> #include <LightGBM/split_info.h> #include <LightGBM/data_partition.h> #include <LightGBM/leaf_splits.h> #include <cstdio> #include <vector> #include <random> #include <cmath> #include <memory> #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, const double* prediction, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, double prediction, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; void SetSplitCallback(SplitCallback* callback) override { split_callback_.reset(callback); } void SetCategoricalSplitCallback(CategoricalSplitCallback* callback) override { categorical_split_callback_.reset(callback); } protected: /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; double CalculateOndemandCosts(int feature_index, int leaf_index); /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; #endif /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; std::vector<bool> feature_used; std::vector<uint32_t> feature_used_in_data; std::unique_ptr<SplitCallback> split_callback_; std::unique_ptr<CategoricalSplitCallback> categorical_split_callback_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
GB_unop__identity_int32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_fp32) // op(A') function: GB (_unop_tran__identity_int32_fp32) // C type: int32_t // A type: float // cast: int32_t cij = GB_cast_to_int32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = GB_cast_to_int32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_fp32) ( int32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int32_t z = GB_cast_to_int32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int32_t z = GB_cast_to_int32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bml_export_ellsort_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_types.h" #include "bml_allocate_ellsort.h" #include "bml_export_ellsort.h" #include "bml_types_ellsort.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Convert a bml matrix into a dense matrix. * * \ingroup convert_group * * \param A The bml matrix * \return The dense matrix */ void *TYPED_FUNC( bml_export_to_dense_ellsort) ( bml_matrix_ellsort_t * A, bml_dense_order_t order) { int N = A->N; int M = A->M; int *A_nnz = A->nnz; int *A_index = A->index; REAL_T *A_dense = bml_allocate_memory(sizeof(REAL_T) * A->N * A->N); REAL_T *A_value = A->value; switch (order) { case dense_row_major: #pragma omp parallel for shared(N, M, A_nnz, A_index, A_value, A_dense) for (int i = 0; i < N; i++) { for (int j = 0; j < A_nnz[i]; j++) { A_dense[ROWMAJOR (i, A_index[ROWMAJOR(i, j, N, M)], N, N)] = A_value[ROWMAJOR(i, j, N, M)]; } } break; case dense_column_major: #pragma omp parallel for shared(N, M, A_nnz, A_index, A_value, A_dense) for (int i = 0; i < N; i++) { for (int j = 0; j < A_nnz[i]; j++) { A_dense[COLMAJOR (i, A_index[ROWMAJOR(i, j, N, M)], N, N)] = A_value[ROWMAJOR(i, j, N, M)]; } } break; default: LOG_ERROR("unknown order\n"); break; } return A_dense; }
app_baseline.c
/** * @file app.c * @brief Template for a Host Application Source File. * */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include "../../support/timer.h" static int32_t *A; static int32_t *B; static int32_t *C; static int32_t *C2; /** * @brief creates a "test file" by filling a buffer of 64MB with pseudo-random values * @param nr_elements how many 32-bit elements we want the file to be * @return the buffer address */ void *create_test_file(unsigned int nr_elements) { srand(0); printf("nr_elements\t%u\t", nr_elements); A = (uint32_t*) malloc(nr_elements * sizeof(uint32_t)); B = (uint32_t*) malloc(nr_elements * sizeof(uint32_t)); C = (uint32_t*) malloc(nr_elements * sizeof(uint32_t)); for (int i = 0; i < nr_elements; i++) { A[i] = (int) (rand()); B[i] = (int) (rand()); } } /** * @brief compute output in the host */ static void vector_addition_host(unsigned int nr_elements, int t) { omp_set_num_threads(t); #pragma omp parallel for for (int i = 0; i < nr_elements; i++) { C[i] = A[i] + B[i]; } } // Params --------------------------------------------------------------------- typedef struct Params { int input_size; int n_warmup; int n_reps; int n_threads; }Params; void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -t <T> # of threads (default=8)" "\n -w <W> # of untimed warmup iterations (default=1)" "\n -e <E> # of timed repetition iterations (default=3)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=8M elements)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.input_size = 16777216; p.n_warmup = 1; p.n_reps = 3; p.n_threads = 5; int opt; while((opt = getopt(argc, argv, "hi:w:e:t:")) >= 0) { switch(opt) { case 'h': usage(); exit(0); break; case 'i': p.input_size = atoi(optarg); break; case 'w': p.n_warmup = atoi(optarg); break; case 'e': p.n_reps = atoi(optarg); break; case 't': p.n_threads = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.n_threads > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); const unsigned int file_size = p.input_size; // Create an input file with arbitrary data. create_test_file(file_size); Timer timer; start(&timer, 0, 0); vector_addition_host(file_size, p.n_threads); stop(&timer, 0); printf("Kernel "); print(&timer, 0, 1); printf("\n"); free(A); free(B); free(C); return 0; }
GB_unaryop__ainv_bool_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_bool // op(A') function: GB_tran__ainv_bool_bool // C type: bool // A type: bool // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_bool ( bool *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Square.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Square.c" #else void THNN_(Square_updateOutput)( THNNState *state, THTensor *input, THTensor *output) { THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, *output_data = (*input_data) * (*input_data); ); } else { real *output_data = THTensor_(data)(output); real *input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(input); i++) output_data[i] = input_data[i]*input_data[i]; } } void THNN_(Square_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) { THNN_CHECK_SHAPE(input, gradOutput); THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, *gradInput_data = 2.0 * (*gradOutput_data) * (*input_data); ); } else { real *gradOutput_data = THTensor_(data)(gradOutput); real *gradInput_data = THTensor_(data)(gradInput); real *input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i]; } } #endif
evaluation.c
#include "common.h" static void clear_buffers(uint64_t* restrict A, uint64_t* restrict B, const int s) { #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<s;i++) A[i] = B[i] = 0; } #ifdef _OPENMP static int top_down_step(const int level, const int nodes, const int num_frontier, const int max_degree, const int* restrict degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int level, const int nodes, const int num_frontier, const int max_degree, const int* restrict degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // int n = adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; next[count++] = n; } } } return count; } #endif static bool bfs(const int nodes, const int max_degree, const int* restrict degree, const int adjacency[nodes][max_degree], const int based_nodes, const int height, const int based_height, const int groups, int *diameter, double *ASPL) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *distance = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); bool reached = true; double sum = 0.0; *diameter = 0; for(int s=rank;s<based_nodes;s+=procs){ int s1 = (s/based_height) * height + (s%based_height); int num_frontier = 1, level = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = s1; distance[s1] = level; bitmap[s1] = VISITED; while(1){ num_frontier = top_down_step(level++, nodes, num_frontier, max_degree, degree, (int *)adjacency, frontier, next, distance, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } *diameter = MAX(*diameter, level-1); for(int i=0;i<nodes;i++){ if(i == s1) continue; if(bitmap[i] == NOT_VISITED) reached = false; sum += (distance[i] + 1) * groups; } } free(bitmap); free(frontier); free(distance); free(next); MPI_Allreduce(MPI_IN_PLACE, &reached, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD); if(!reached) return false; MPI_Allreduce(MPI_IN_PLACE, diameter, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); *ASPL = sum / ((((double)nodes-1)*nodes)); return true; } static bool matrix_op(const int nodes, const int max_degree, const int* restrict degree, const int* restrict adjacency, const int based_nodes, const int height, const int based_height, const int groups, int *diameter, double *ASPL, const int* rotate_hash) { unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS; unsigned int chunk = (elements+(procs-1))/procs; size_t s = nodes*chunk*sizeof(uint64_t); uint64_t* A = malloc(s); // uint64_t A[nodes][chunk]; uint64_t* B = malloc(s); // uint64_t B[nodes][chunk]; int parsize = (elements+(chunk-1))/chunk; double sum = 0.0; *diameter = 1; for(int t=rank;t<parsize;t+=procs){ uint64_t kk, l; clear_buffers(A, B, nodes*chunk); for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<based_nodes; l++){ unsigned int offset = (UINT64_BITS*t*chunk+l)*chunk+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<nodes;i++){ int ii = rotate_hash[i]; for(int j=0;j<degree[i];j++){ int n = *(adjacency + i * max_degree + j); // int n = adjacency[i][j]; int nn = rotate_hash[n]; for(int k=0;k<chunk;k++) B[ii*chunk+k] |= A[nn*chunk+k]; } } uint64_t num = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+:num) #endif for(int i=0;i<chunk*nodes;i++) num += POPCNT(B[i]); if(num == (uint64_t)nodes*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)nodes * l - num) * groups; } *diameter = MAX(*diameter, kk+1); } MPI_Allreduce(MPI_IN_PLACE, diameter, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); sum += (double)nodes * (nodes - 1); free(A); free(B); if(*diameter > nodes){ // PRINT_R0("This graph is not connected graph.\n"); return false; } *ASPL = sum / (((double)nodes-1)*nodes); return true; } bool evaluation(const int nodes, const int max_degree, const int* restrict degree, const int groups, const int* restrict adjacency, const int based_nodes,const int height, const int based_height, int *diameter, double *ASPL, const bool enable_bfs, const int* rotate_hash) { timer_start(TIMER_APSP); bool flag; if(enable_bfs) flag = bfs(nodes, max_degree, degree, (const int (*)[max_degree])adjacency, based_nodes, height, based_height, groups, diameter, ASPL); else flag = matrix_op(nodes, max_degree, degree, adjacency, based_nodes, height, based_height, groups, diameter, ASPL, rotate_hash); timer_stop(TIMER_APSP); return flag; }
sptensor.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "sptensor.h" #include "matrix.h" #include "sort.h" #include "io.h" #include "timer.h" #include <math.h> /****************************************************************************** * PRIVATE FUNCTONS *****************************************************************************/ static inline int p_same_coord( sptensor_t const * const tt, idx_t const i, idx_t const j) { idx_t const nmodes = tt->nmodes; if(nmodes == 3) { return (tt->ind[0][i] == tt->ind[0][j]) && (tt->ind[1][i] == tt->ind[1][j]) && (tt->ind[2][i] == tt->ind[2][j]); } else { for(idx_t m=0; m < nmodes; ++m) { if(tt->ind[m][i] != tt->ind[m][j]) { return 0; } } return 1; } } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ val_t tt_normsq(sptensor_t const * const tt) { val_t norm = 0.0; val_t const * const restrict tv = tt->vals; for(idx_t n=0; n < tt->nnz; ++n) { norm += tv[n] * tv[n]; } return norm; } double tt_density( sptensor_t const * const tt) { double root = pow((double)tt->nnz, 1./(double)tt->nmodes); double density = 1.0; for(idx_t m=0; m < tt->nmodes; ++m) { density *= root / (double)tt->dims[m]; } return density; } idx_t * tt_get_slices( sptensor_t const * const tt, idx_t const m, idx_t * nunique) { /* get maximum number of unique slices */ idx_t minidx = tt->dims[m]; idx_t maxidx = 0; idx_t const nnz = tt->nnz; idx_t const * const inds = tt->ind[m]; /* find maximum number of uniques */ for(idx_t n=0; n < nnz; ++n) { minidx = SS_MIN(minidx, inds[n]); maxidx = SS_MAX(maxidx, inds[n]); } /* +1 because maxidx is inclusive, not exclusive */ idx_t const maxrange = 1 + maxidx - minidx; /* mark slices which are present and count uniques */ idx_t * slice_mkrs = calloc(maxrange, sizeof(*slice_mkrs)); idx_t found = 0; for(idx_t n=0; n < nnz; ++n) { assert(inds[n] >= minidx); idx_t const idx = inds[n] - minidx; if(slice_mkrs[idx] == 0) { slice_mkrs[idx] = 1; ++found; } } *nunique = found; /* now copy unique slices */ idx_t * slices = splatt_malloc(found * sizeof(*slices)); idx_t ptr = 0; for(idx_t i=0; i < maxrange; ++i) { if(slice_mkrs[i] == 1) { slices[ptr++] = i + minidx; } } free(slice_mkrs); return slices; } idx_t * tt_get_hist( sptensor_t const * const tt, idx_t const mode) { idx_t * restrict hist = splatt_malloc(tt->dims[mode] * sizeof(*hist)); memset(hist, 0, tt->dims[mode] * sizeof(*hist)); idx_t const * const restrict inds = tt->ind[mode]; #pragma omp parallel for schedule(static) for(idx_t x=0; x < tt->nnz; ++x) { #pragma omp atomic ++hist[inds[x]]; } return hist; } idx_t tt_remove_dups( sptensor_t * const tt) { tt_sort(tt, 0, NULL); idx_t const nmodes = tt->nmodes; idx_t newnnz = 0; for(idx_t nnz = 1; nnz < tt->nnz; ++nnz) { /* if the two nnz are the same, average them */ if(p_same_coord(tt, newnnz, nnz)) { tt->vals[newnnz] += tt->vals[nnz]; } else { /* new another nnz */ ++newnnz; for(idx_t m=0; m < nmodes; ++m) { tt->ind[m][newnnz] = tt->ind[m][nnz]; } tt->vals[newnnz] = tt->vals[nnz]; } } ++newnnz; idx_t const diff = tt->nnz - newnnz; tt->nnz = newnnz; return diff; } idx_t tt_remove_empty( sptensor_t * const tt) { idx_t dim_sizes[MAX_NMODES]; idx_t nremoved = 0; /* Allocate indmap */ idx_t const nmodes = tt->nmodes; idx_t const nnz = tt->nnz; idx_t maxdim = 0; for(idx_t m=0; m < tt->nmodes; ++m) { maxdim = tt->dims[m] > maxdim ? tt->dims[m] : maxdim; } /* slice counts */ idx_t * scounts = splatt_malloc(maxdim * sizeof(*scounts)); for(idx_t m=0; m < nmodes; ++m) { dim_sizes[m] = 0; memset(scounts, 0, maxdim * sizeof(*scounts)); /* Fill in indmap */ for(idx_t n=0; n < tt->nnz; ++n) { /* keep track of #unique slices */ if(scounts[tt->ind[m][n]] == 0) { scounts[tt->ind[m][n]] = 1; ++dim_sizes[m]; } } /* move on if no remapping is necessary */ if(dim_sizes[m] == tt->dims[m]) { tt->indmap[m] = NULL; continue; } nremoved += tt->dims[m] - dim_sizes[m]; /* Now scan to remove empty slices */ idx_t ptr = 0; for(idx_t i=0; i < tt->dims[m]; ++i) { if(scounts[i] == 1) { scounts[i] = ptr++; } } tt->indmap[m] = splatt_malloc(dim_sizes[m] * sizeof(**tt->indmap)); /* relabel all indices in mode m */ tt->dims[m] = dim_sizes[m]; for(idx_t n=0; n < tt->nnz; ++n) { idx_t const global = tt->ind[m][n]; idx_t const local = scounts[global]; assert(local < dim_sizes[m]); tt->indmap[m][local] = global; /* store local -> global mapping */ tt->ind[m][n] = local; } } splatt_free(scounts); return nremoved; } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ sptensor_t * tt_read( char const * const ifname) { return tt_read_file(ifname); } sptensor_t * tt_alloc( idx_t const nnz, idx_t const nmodes) { sptensor_t * tt = (sptensor_t*) splatt_malloc(sizeof(*tt)); tt->tiled = SPLATT_NOTILE; tt->nnz = nnz; tt->vals = splatt_malloc(nnz * sizeof(*tt->vals)); tt->nmodes = nmodes; tt->type = (nmodes == 3) ? SPLATT_3MODE : SPLATT_NMODE; tt->dims = splatt_malloc(nmodes * sizeof(*tt->dims)); tt->ind = splatt_malloc(nmodes * sizeof(*tt->ind)); for(idx_t m=0; m < nmodes; ++m) { tt->ind[m] = splatt_malloc(nnz * sizeof(**tt->ind)); tt->indmap[m] = NULL; } return tt; } void tt_fill( sptensor_t * const tt, idx_t const nnz, idx_t const nmodes, idx_t ** const inds, val_t * const vals) { tt->tiled = SPLATT_NOTILE; tt->nnz = nnz; tt->vals = vals; tt->ind = inds; tt->nmodes = nmodes; tt->type = (nmodes == 3) ? SPLATT_3MODE : SPLATT_NMODE; tt->dims = splatt_malloc(nmodes * sizeof(*tt->dims)); for(idx_t m=0; m < nmodes; ++m) { tt->indmap[m] = NULL; tt->dims[m] = 1 + inds[m][0]; for(idx_t i=1; i < nnz; ++i) { tt->dims[m] = SS_MAX(tt->dims[m], 1 + inds[m][i]); } } } void tt_free( sptensor_t * tt) { tt->nnz = 0; for(idx_t m=0; m < tt->nmodes; ++m) { splatt_free(tt->ind[m]); splatt_free(tt->indmap[m]); } tt->nmodes = 0; splatt_free(tt->dims); splatt_free(tt->ind); splatt_free(tt->vals); splatt_free(tt); } spmatrix_t * tt_unfold( sptensor_t * const tt, idx_t const mode) { idx_t nrows = tt->dims[mode]; idx_t ncols = 1; for(idx_t m=1; m < tt->nmodes; ++m) { ncols *= tt->dims[(mode + m) % tt->nmodes]; } /* sort tt */ tt_sort(tt, mode, NULL); /* allocate and fill matrix */ spmatrix_t * mat = spmat_alloc(nrows, ncols, tt->nnz); idx_t * const rowptr = mat->rowptr; idx_t * const colind = mat->colind; val_t * const mvals = mat->vals; /* make sure to skip ahead to the first non-empty slice */ idx_t row = 0; for(idx_t n=0; n < tt->nnz; ++n) { /* increment row and account for possibly empty ones */ while(row <= tt->ind[mode][n]) { rowptr[row++] = n; } mvals[n] = tt->vals[n]; idx_t col = 0; idx_t mult = 1; for(idx_t m = 0; m < tt->nmodes; ++m) { idx_t const off = tt->nmodes - 1 - m; if(off == mode) { continue; } col += tt->ind[off][n] * mult; mult *= tt->dims[off]; } colind[n] = col; } /* account for any empty rows at end, too */ for(idx_t r=row; r <= nrows; ++r) { rowptr[r] = tt->nnz; } return mat; }
sstruct_vector.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_SStructVector class. * *****************************************************************************/ #include "_hypre_sstruct_mv.h" /*========================================================================== * SStructPVector routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorRef( hypre_SStructPVector *vector, hypre_SStructPVector **vector_ref ) { hypre_SStructPVectorRefCount(vector) ++; *vector_ref = vector; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorCreate( MPI_Comm comm, hypre_SStructPGrid *pgrid, hypre_SStructPVector **pvector_ptr) { hypre_SStructPVector *pvector; HYPRE_Int nvars; hypre_StructVector **svectors; hypre_CommPkg **comm_pkgs; hypre_StructGrid *sgrid; HYPRE_Int var; pvector = hypre_TAlloc(hypre_SStructPVector, 1); hypre_SStructPVectorComm(pvector) = comm; hypre_SStructPVectorPGrid(pvector) = pgrid; nvars = hypre_SStructPGridNVars(pgrid); hypre_SStructPVectorNVars(pvector) = nvars; svectors = hypre_TAlloc(hypre_StructVector *, nvars); for (var = 0; var < nvars; var++) { sgrid = hypre_SStructPGridSGrid(pgrid, var); svectors[var] = hypre_StructVectorCreate(comm, sgrid); } hypre_SStructPVectorSVectors(pvector) = svectors; comm_pkgs = hypre_TAlloc(hypre_CommPkg *, nvars); for (var = 0; var < nvars; var++) { comm_pkgs[var] = NULL; } hypre_SStructPVectorCommPkgs(pvector) = comm_pkgs; hypre_SStructPVectorRefCount(pvector) = 1; /* GEC inclusion of dataindices */ hypre_SStructPVectorDataIndices(pvector) = NULL ; *pvector_ptr = pvector; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorDestroy( hypre_SStructPVector *pvector ) { HYPRE_Int nvars; hypre_StructVector **svectors; hypre_CommPkg **comm_pkgs; HYPRE_Int var; /* GEC destroying dataindices and data in pvector */ HYPRE_Int *dataindices; if (pvector) { hypre_SStructPVectorRefCount(pvector) --; if (hypre_SStructPVectorRefCount(pvector) == 0) { nvars = hypre_SStructPVectorNVars(pvector); svectors = hypre_SStructPVectorSVectors(pvector); comm_pkgs = hypre_SStructPVectorCommPkgs(pvector); dataindices = hypre_SStructPVectorDataIndices(pvector); for (var = 0; var < nvars; var++) { hypre_StructVectorDestroy(svectors[var]); hypre_CommPkgDestroy(comm_pkgs[var]); } hypre_TFree(dataindices); hypre_TFree(svectors); hypre_TFree(comm_pkgs); hypre_TFree(pvector); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorInitialize( hypre_SStructPVector *pvector ) { hypre_SStructPGrid *pgrid = hypre_SStructPVectorPGrid(pvector); HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid); hypre_StructVector *svector; HYPRE_Int var; for (var = 0; var < nvars; var++) { svector = hypre_SStructPVectorSVector(pvector, var); hypre_StructVectorInitialize(svector); if (vartypes[var] > 0) { /* needed to get AddTo accumulation correct between processors */ hypre_StructVectorClearGhostValues(svector); } } hypre_SStructPVectorAccumulated(pvector) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorSetValues( hypre_SStructPVector *pvector, hypre_Index index, HYPRE_Int var, HYPRE_Complex *value, HYPRE_Int action ) { hypre_StructVector *svector = hypre_SStructPVectorSVector(pvector, var); HYPRE_Int ndim = hypre_StructVectorNDim(svector); hypre_BoxArray *grid_boxes; hypre_Box *box, *grow_box; HYPRE_Int i; /* set values inside the grid */ hypre_StructVectorSetValues(svector, index, value, action, -1, 0); /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPVectorPGrid(pvector); hypre_Index varoffset; HYPRE_Int done = 0; grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(svector)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if (hypre_IndexInBox(index, box)) { done = 1; break; } } if (!done) { grow_box = hypre_BoxCreate(ndim); hypre_SStructVariableGetOffset( hypre_SStructPGridVarType(pgrid, var), ndim, varoffset); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); hypre_CopyBox(box, grow_box); hypre_BoxGrowByIndex(grow_box, varoffset); if (hypre_IndexInBox(index, grow_box)) { hypre_StructVectorSetValues(svector, index, value, action, i, 1); break; } } hypre_BoxDestroy(grow_box); } } else { /* Set */ grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(svector)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if (!hypre_IndexInBox(index, box)) { hypre_StructVectorClearValues(svector, index, i, 1); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorSetBoxValues( hypre_SStructPVector *pvector, hypre_Index ilower, hypre_Index iupper, HYPRE_Int var, HYPRE_Complex *values, HYPRE_Int action ) { hypre_StructVector *svector = hypre_SStructPVectorSVector(pvector, var); HYPRE_Int ndim = hypre_StructVectorNDim(svector); hypre_BoxArray *grid_boxes; hypre_Box *box; hypre_Box *value_box; HYPRE_Int i, j; box = hypre_BoxCreate(ndim); hypre_CopyIndex(ilower, hypre_BoxIMin(box)); hypre_CopyIndex(iupper, hypre_BoxIMax(box)); value_box = box; /* set values inside the grid */ hypre_StructVectorSetBoxValues(svector, box, value_box, values, action, -1, 0); /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPVectorPGrid(pvector); hypre_Index varoffset; hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes; hypre_Box *left_box, *done_box, *int_box; hypre_SStructVariableGetOffset( hypre_SStructPGridVarType(pgrid, var), ndim, varoffset); grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(svector)); left_boxes = hypre_BoxArrayCreate(1, ndim); done_boxes = hypre_BoxArrayCreate(2, ndim); temp_boxes = hypre_BoxArrayCreate(0, ndim); /* done_box always points to the first box in done_boxes */ done_box = hypre_BoxArrayBox(done_boxes, 0); /* int_box always points to the second box in done_boxes */ int_box = hypre_BoxArrayBox(done_boxes, 1); hypre_CopyBox(box, hypre_BoxArrayBox(left_boxes, 0)); hypre_BoxArraySetSize(left_boxes, 1); hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 0); hypre_ForBoxI(i, grid_boxes) { hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 1); hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box); hypre_BoxGrowByIndex(done_box, varoffset); hypre_ForBoxI(j, left_boxes) { left_box = hypre_BoxArrayBox(left_boxes, j); hypre_IntersectBoxes(left_box, done_box, int_box); hypre_StructVectorSetBoxValues(svector, int_box, value_box, values, action, i, 1); } } hypre_BoxArrayDestroy(left_boxes); hypre_BoxArrayDestroy(done_boxes); hypre_BoxArrayDestroy(temp_boxes); } else { /* Set */ hypre_BoxArray *diff_boxes; hypre_Box *grid_box, *diff_box; grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(svector)); diff_boxes = hypre_BoxArrayCreate(0, ndim); hypre_ForBoxI(i, grid_boxes) { grid_box = hypre_BoxArrayBox(grid_boxes, i); hypre_BoxArraySetSize(diff_boxes, 0); hypre_SubtractBoxes(box, grid_box, diff_boxes); hypre_ForBoxI(j, diff_boxes) { diff_box = hypre_BoxArrayBox(diff_boxes, j); hypre_StructVectorClearBoxValues(svector, diff_box, i, 1); } } hypre_BoxArrayDestroy(diff_boxes); } hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorAccumulate( hypre_SStructPVector *pvector ) { hypre_SStructPGrid *pgrid = hypre_SStructPVectorPGrid(pvector); HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); hypre_StructVector **svectors = hypre_SStructPVectorSVectors(pvector); hypre_CommPkg **comm_pkgs = hypre_SStructPVectorCommPkgs(pvector); hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; hypre_CommHandle *comm_handle; HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid); HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid); hypre_Index varoffset; HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; hypre_StructGrid *sgrid; HYPRE_Int var, d; /* if values already accumulated, just return */ if (hypre_SStructPVectorAccumulated(pvector)) { return hypre_error_flag; } for (var = 0; var < nvars; var++) { if (vartypes[var] > 0) { sgrid = hypre_StructVectorGrid(svectors[var]); hypre_SStructVariableGetOffset(vartypes[var], ndim, varoffset); for (d = 0; d < ndim; d++) { num_ghost[2*d] = num_ghost[2*d+1] = hypre_IndexD(varoffset, d); } hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info); hypre_CommPkgDestroy(comm_pkgs[var]); hypre_CommPkgCreate(comm_info, hypre_StructVectorDataSpace(svectors[var]), hypre_StructVectorDataSpace(svectors[var]), 1, NULL, 0, hypre_StructVectorComm(svectors[var]), &comm_pkgs[var]); /* accumulate values from AddTo */ hypre_CommPkgCreate(comm_info, hypre_StructVectorDataSpace(svectors[var]), hypre_StructVectorDataSpace(svectors[var]), 1, NULL, 1, hypre_StructVectorComm(svectors[var]), &comm_pkg); hypre_InitializeCommunication(comm_pkg, hypre_StructVectorData(svectors[var]), hypre_StructVectorData(svectors[var]), 1, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); hypre_CommInfoDestroy(comm_info); hypre_CommPkgDestroy(comm_pkg); } } hypre_SStructPVectorAccumulated(pvector) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorAssemble( hypre_SStructPVector *pvector ) { HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); hypre_StructVector **svectors = hypre_SStructPVectorSVectors(pvector); HYPRE_Int var; hypre_SStructPVectorAccumulate(pvector); for (var = 0; var < nvars; var++) { hypre_StructVectorClearGhostValues(svectors[var]); hypre_StructVectorAssemble(svectors[var]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorGather( hypre_SStructPVector *pvector ) { HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); hypre_StructVector **svectors = hypre_SStructPVectorSVectors(pvector); hypre_CommPkg **comm_pkgs = hypre_SStructPVectorCommPkgs(pvector); hypre_CommHandle *comm_handle; HYPRE_Int var; for (var = 0; var < nvars; var++) { if (comm_pkgs[var] != NULL) { hypre_InitializeCommunication(comm_pkgs[var], hypre_StructVectorData(svectors[var]), hypre_StructVectorData(svectors[var]), 0, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorGetValues( hypre_SStructPVector *pvector, hypre_Index index, HYPRE_Int var, HYPRE_Complex *value ) { hypre_SStructPGrid *pgrid = hypre_SStructPVectorPGrid(pvector); hypre_StructVector *svector = hypre_SStructPVectorSVector(pvector, var); hypre_StructGrid *sgrid = hypre_StructVectorGrid(svector); hypre_BoxArray *iboxarray = hypre_SStructPGridIBoxArray(pgrid, var); hypre_BoxArray *tboxarray; /* temporarily swap out sgrid boxes in order to get boundary data */ tboxarray = hypre_StructGridBoxes(sgrid); hypre_StructGridBoxes(sgrid) = iboxarray; hypre_StructVectorSetValues(svector, index, value, -1, -1, 0); hypre_StructGridBoxes(sgrid) = tboxarray; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorGetBoxValues( hypre_SStructPVector *pvector, hypre_Index ilower, hypre_Index iupper, HYPRE_Int var, HYPRE_Complex *values ) { hypre_SStructPGrid *pgrid = hypre_SStructPVectorPGrid(pvector); hypre_StructVector *svector = hypre_SStructPVectorSVector(pvector, var); hypre_StructGrid *sgrid = hypre_StructVectorGrid(svector); hypre_BoxArray *iboxarray = hypre_SStructPGridIBoxArray(pgrid, var); hypre_BoxArray *tboxarray; hypre_Box *box; box = hypre_BoxCreate(hypre_StructVectorNDim(svector)); hypre_CopyIndex(ilower, hypre_BoxIMin(box)); hypre_CopyIndex(iupper, hypre_BoxIMax(box)); /* temporarily swap out sgrid boxes in order to get boundary data */ tboxarray = hypre_StructGridBoxes(sgrid); hypre_StructGridBoxes(sgrid) = iboxarray; hypre_StructVectorSetBoxValues(svector, box, box, values, -1, -1, 0); hypre_StructGridBoxes(sgrid) = tboxarray; hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorSetConstantValues( hypre_SStructPVector *pvector, HYPRE_Complex value ) { HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); hypre_StructVector *svector; HYPRE_Int var; for (var = 0; var < nvars; var++) { svector = hypre_SStructPVectorSVector(pvector, var); hypre_StructVectorSetConstantValues(svector, value); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * For now, just print multiple files *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorPrint( const char *filename, hypre_SStructPVector *pvector, HYPRE_Int all ) { HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); HYPRE_Int var; char new_filename[255]; for (var = 0; var < nvars; var++) { hypre_sprintf(new_filename, "%s.%02d", filename, var); hypre_StructVectorPrint(new_filename, hypre_SStructPVectorSVector(pvector, var), all); } return hypre_error_flag; } /*========================================================================== * SStructVector routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorRef( hypre_SStructVector *vector, hypre_SStructVector **vector_ref ) { hypre_SStructVectorRefCount(vector) ++; *vector_ref = vector; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorSetConstantValues( hypre_SStructVector *vector, HYPRE_Complex value ) { HYPRE_Int nparts = hypre_SStructVectorNParts(vector); hypre_SStructPVector *pvector; HYPRE_Int part; for (part = 0; part < nparts; part++) { pvector = hypre_SStructVectorPVector(vector, part); hypre_SStructPVectorSetConstantValues(pvector, value); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Here the address of the parvector inside the semistructured vector * is provided to the "outside". It assumes that the vector type * is HYPRE_SSTRUCT *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorConvert( hypre_SStructVector *vector, hypre_ParVector **parvector_ptr ) { *parvector_ptr = hypre_SStructVectorParVector(vector); return hypre_error_flag; } /*-------------------------------------------------------------------------- * Copy values from vector to parvector and provide the address *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorParConvert( hypre_SStructVector *vector, hypre_ParVector **parvector_ptr ) { hypre_ParVector *parvector; HYPRE_Complex *pardata; HYPRE_Int pari; hypre_SStructPVector *pvector; hypre_StructVector *y; hypre_Box *y_data_box; HYPRE_Int yi; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; HYPRE_Int bi; hypre_Index loop_size; hypre_IndexRef start; hypre_Index stride; HYPRE_Int nparts, nvars; HYPRE_Int part, var, i; hypre_SetIndex(stride, 1); parvector = hypre_SStructVectorParVector(vector); pardata = hypre_VectorData(hypre_ParVectorLocalVector(parvector)); pari = 0; nparts = hypre_SStructVectorNParts(vector); for (part = 0; part < nparts; part++) { pvector = hypre_SStructVectorPVector(vector, part); nvars = hypre_SStructPVectorNVars(pvector); for (var = 0; var < nvars; var++) { y = hypre_SStructPVectorSVector(pvector, var); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_SStructVectorNDim(vector), loop_size, y_data_box, start, stride, yi, box, start, stride, bi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,bi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(yi, bi) { pardata[pari+bi] = yp[yi]; } hypre_BoxLoop2End(yi, bi); pari += hypre_BoxVolume(box); } } } *parvector_ptr = hypre_SStructVectorParVector(vector); return hypre_error_flag; } /*-------------------------------------------------------------------------- * Used for HYPRE_SSTRUCT type semi structured vectors. * A dummy function to indicate that the struct vector part will be used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorRestore( hypre_SStructVector *vector, hypre_ParVector *parvector ) { return hypre_error_flag; } /*-------------------------------------------------------------------------- * Copy values from parvector to vector *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorParRestore( hypre_SStructVector *vector, hypre_ParVector *parvector ) { HYPRE_Complex *pardata; HYPRE_Int pari; hypre_SStructPVector *pvector; hypre_StructVector *y; hypre_Box *y_data_box; HYPRE_Int yi; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; HYPRE_Int bi; hypre_Index loop_size; hypre_IndexRef start; hypre_Index stride; HYPRE_Int nparts, nvars; HYPRE_Int part, var, i; if (parvector != NULL) { hypre_SetIndex(stride, 1); parvector = hypre_SStructVectorParVector(vector); pardata = hypre_VectorData(hypre_ParVectorLocalVector(parvector)); pari = 0; nparts = hypre_SStructVectorNParts(vector); for (part = 0; part < nparts; part++) { pvector = hypre_SStructVectorPVector(vector, part); nvars = hypre_SStructPVectorNVars(pvector); for (var = 0; var < nvars; var++) { y = hypre_SStructPVectorSVector(pvector, var); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_SStructVectorNDim(vector), loop_size, y_data_box, start, stride, yi, box, start, stride, bi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi,bi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(yi, bi) { yp[yi] = pardata[pari+bi]; } hypre_BoxLoop2End(yi, bi); pari += hypre_BoxVolume(box); } } } } return hypre_error_flag; } /*------------------------------------------------------------------ * GEC1002 shell initialization of a pvector * if the pvector exists. This function will set the dataindices * and datasize of the pvector. Datasize is the sum of the sizes * of each svector and dataindices is defined as * dataindices[var]= aggregated initial size of the pvector[var] * When ucvars are present we need to modify adding nucvars. *----------------------------------------------------------------*/ HYPRE_Int hypre_SStructPVectorInitializeShell( hypre_SStructPVector *pvector) { HYPRE_Int nvars = hypre_SStructPVectorNVars(pvector); HYPRE_Int var; HYPRE_Int pdatasize; HYPRE_Int svectdatasize; HYPRE_Int *pdataindices; HYPRE_Int nucvars = 0; hypre_StructVector *svector; pdatasize = 0; pdataindices = hypre_CTAlloc(HYPRE_Int, nvars); for (var =0; var < nvars; var++) { svector = hypre_SStructPVectorSVector(pvector, var); hypre_StructVectorInitializeShell(svector); pdataindices[var] = pdatasize ; svectdatasize = hypre_StructVectorDataSize(svector); pdatasize += svectdatasize; } /* GEC1002 assuming that the ucvars are located at the end, after the * the size of the vars has been included we add the number of uvar * for this part */ hypre_SStructPVectorDataIndices(pvector) = pdataindices; hypre_SStructPVectorDataSize(pvector) = pdatasize+nucvars ; hypre_SStructPVectorAccumulated(pvector) = 0; return hypre_error_flag; } /*------------------------------------------------------------------ * GEC1002 shell initialization of a sstructvector * if the vector exists. This function will set the * dataindices and datasize of the vector. When ucvars * are present at the end of all the parts we need to modify adding pieces * for ucvars. *----------------------------------------------------------------*/ HYPRE_Int hypre_SStructVectorInitializeShell( hypre_SStructVector *vector) { HYPRE_Int part ; HYPRE_Int datasize; HYPRE_Int pdatasize; HYPRE_Int nparts = hypre_SStructVectorNParts(vector); hypre_SStructPVector *pvector; HYPRE_Int *dataindices; datasize = 0; dataindices = hypre_CTAlloc(HYPRE_Int, nparts); for (part = 0; part < nparts; part++) { pvector = hypre_SStructVectorPVector(vector, part) ; hypre_SStructPVectorInitializeShell(pvector); pdatasize = hypre_SStructPVectorDataSize(pvector); dataindices[part] = datasize ; datasize += pdatasize ; } hypre_SStructVectorDataIndices(vector) = dataindices; hypre_SStructVectorDataSize(vector) = datasize ; return hypre_error_flag; } HYPRE_Int hypre_SStructVectorClearGhostValues(hypre_SStructVector *vector) { HYPRE_Int nparts= hypre_SStructVectorNParts(vector); hypre_SStructPVector *pvector; hypre_StructVector *svector; HYPRE_Int part; HYPRE_Int nvars, var; for (part= 0; part< nparts; part++) { pvector= hypre_SStructVectorPVector(vector, part); nvars = hypre_SStructPVectorNVars(pvector); for (var= 0; var< nvars; var++) { svector= hypre_SStructPVectorSVector(pvector, var); hypre_StructVectorClearGhostValues(svector); } } return hypre_error_flag; }
sevnlog.h
// // Created by Giuliano on 29/11/19. // Header to store logging functions and definitions (mostly for debug) // #ifndef SEVN_SEVNLOG_H #define SEVN_SEVNLOG_H #include <string> #include <sstream> #include <omp.h> #include <iostream> #include "errhand.h" //TODO Remove this and correctly use SevnLogging //GI291119: Define the DEBUG_LOG functions to print the Debug message only if enable in the compilation #ifdef DEBUG #define DEBUG_LOG(str) do { std::cout << "DEBUG: FILE::" << __FILE__ << " LINE::" <<__LINE__ << std::endl << " -> " << str << " <- " << std::endl; } while (false) #else #define DEBUG_LOG(str) do { } while (false) #endif //TODO Add the possibility to directly flush the output in some log file(s). //TODO It is really thread safe? namespace sevnstd{ class sevnerr; /*! A thread safe(really?) Logging class to handle the message output and the error. It is * based on a log level scheme. There is a static attribute log_level that has * some integer value by default (20 or 10 id DEBUG has been enabled). * Then a given message is sent in output only if its level is larger than log_level. * A new log_level can be set with the method set_level. * The possible logging message are debug(lvl 10), info(lvl 20), warning(lvl 30), * error(lvl 40), critical (no level always printed). Critical raises automatically an exception, while * in error is optional. A general log method can be used to print output with a custom level. * */ class SevnLogging { public: /** * Default class constructor. */ SevnLogging() {} //TODO Is ok for a single instance to modify the log_level. Is maybe better to modify only a local log_level and the use this in the various log function? /** * Class constructor that set the static log level attribute. * @param level log level to set. */ SevnLogging(int level) { set_level(level); } /** * Default class destructor. */ ~SevnLogging() {} /** * Logs a message to std::cout with integer level on this logger. * @param level message level. * @param errstate message to log. * @param file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. * @param stop if != 0, throw a runtime exception. */ void log(int level, std::string errstate, const char *file_input = nullptr, int line_input = -1, int stop = 0) const; /** * Logs a message to std::cout with level DEBUG (lvl 10) on this logger. * @param errstate message to log. * @param file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. */ void debug(std::string errstate, const char *file_input = nullptr, int line_input = -1) const; /** * Logs a message to std::cout with level INFO (lvl 20) on this logger. * @param errstate message to log. * @param file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. */ void info(std::string errstate, const char *file_input = nullptr, int line_input = -1) const; /** * Logs a message with std::cerr level WARNING (lvl 30) on this logger. * @param errstate message to log. * @param file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. */ void warning(std::string errstate, const char *file_input = nullptr, int line_input = -1) const; /** * Logs a message with std::cerr level ERROR (lvl 40) on this logger and throw an exception. * @param errstate message to log. * @param file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. * @param stop if true throw a sevnerr exception. */ void error(std::string errstate, const char *file_input = nullptr, int line_input = -1, bool stop = true) const; /** * * @tparam E exception derived from the class sevnerr * @param errstate message to log. * @param file_input file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. * @param stop if if true throw a err exception (see below). * @param err exception derived from the class sevnerr */ template<class E> void error(std::string errstate, const char *file_input = nullptr, int line_input = -1, bool stop = true, E&& err= nullptr) const{ std::ostringstream oss; oss << " LOG::ERROR (Thread " << omp_get_thread_num() << "): " << std::endl; oss << " Message : " << errstate << std::endl; if (file_input) oss << " From file: " << std::string(file_input) << std::endl; if (line_input >= 0) oss << " From line: " << line_input << std::endl; std::string err_mess=oss.str(); if (stop) throw err.istance(err_mess); else std::cerr << oss.str(); #pragma omp atomic count_error++; } /** * Logs a message with std::cerr level CRITICAL (no level) on this logger and throw an sevnerr exception. * This message will never be filtered out. * @param errstate message to log. * @param file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. */ void critical(std::string errstate, const char *file_input = nullptr, int line_input = -1) const; /** * Logs a message with std::cerr level CRITICAL (no level) on this logger and throw an exception E. * @tparam E exception derived from the class sevnerr * @param errstate message to log. * @param file_input file_input Null or __FILE__. if __FILE__ is used the message logs the name of the file where the log is called. * @param line_input line_input Null or __LINE__. if __LINE__ is used the message logs the row number in the file where the log is called. * @param err exception derived from the class sevnerr */ template<class E> void critical(std::string errstate, const char *file_input = nullptr, int line_input = -1, E&& err= nullptr) const{ std::ostringstream oss; oss << " LOG::CRITICAL (Thread " << omp_get_thread_num() << "): " << std::endl; oss << " Message : " << errstate << std::endl; if (file_input) oss << " From file: " << std::string(file_input) << std::endl; if (line_input >= 0) oss << " From line: " << line_input << std::endl; std::string err_mess=oss.str(); throw err.istance(err_mess); } ///Variadic prints //debug void inline pdebug() const { std::cout<<"\nLOG::DEBUG (Thread " << omp_get_thread_num() << ")"<< std::endl; #pragma omp atomic count_debug++;} template<typename T, typename... Tail> void pdebug(T head, Tail... tail) const{ if (_LOG_LEVEL::_debug>=log_level) { std::cout << head << " "; pdebug(tail...); } } //info void inline pinfo() const { std::cout<<"\nLOG::INFO (Thread " << omp_get_thread_num() << ")"<< std::endl; #pragma omp atomic count_info++;} template<typename T, typename... Tail> void pinfo(T head, Tail... tail) const { if (_LOG_LEVEL::_info>=log_level) { std::cout << head << " "; pinfo(tail...); } } //warning void inline pwarning() const { std::cerr<<"\nLOG::WARNING (Thread " << omp_get_thread_num() << ")"<< std::endl; #pragma omp atomic count_warning++; } template<typename T, typename... Tail> void pwarning(T head, Tail... tail) const { //if (_LOG_LEVEL::_warning>=log_level and count_warning<=MAX_N_WARNING) { if (_LOG_LEVEL::_warning>=log_level) { std::cerr << head << " "; pwarning(tail...); } } ////WARNING C++17 feature /* *//** * Logs a message to std::cout with level DEBUG (lvl 10) on this logger. * @tparam Args pack of Variadic arguments * @param args args to be printed *//* template<typename... Args> void pdebug(Args... args){ if (_LOG_LEVEL::_debug>=log_level){ std::cout << " LOG::DEBUG (Thread " << omp_get_thread_num() << "): " << std::endl; std::cout << " Message:"; ((std::cout << " "<<args), ...); #pragma omp atomic count_debug++; } } *//** * Logs a message to std::cout with level DEBUG (lvl 10) on this logger. * @tparam Args pack of Variadic arguments * @param args args to be printed *//* template<typename... Args> void pinfo(Args... args){ if (_LOG_LEVEL::_info>=log_level){ std::cout << " LOG::INFO (Thread " << omp_get_thread_num() << "): " << std::endl; std::cout << " Message:"; ((std::cout << " "<<args), ...); #pragma omp atomic count_info++; } }*/ /** * Get the current level of this logger * @return log level. */ inline int get_level() { return log_level;}; /** * Get the current counter of debug calls * @return current counter of debug calls */ inline unsigned int get_Ndebug() { return count_debug;}; /** * Get the current counter of info calls * @return current counter of info calls */ inline unsigned int get_Ninfo() { return count_info;}; /** * Get the current counter of warning calls * @return current counter of warning calls */ inline unsigned int get_Nwarning() { return count_warning;}; /** * Get the current counter of error calls * @return current counter of error calls */ inline unsigned int get_Nerror() { return count_error;}; /** * Get the current counter of custom log calls * @return current counter of custom log calls */ inline unsigned int get_Ncustom() { return count_custom_log;}; /** * Public interface to change log level * @param level string, can be: dubug, info, warning,error. */ void set_level(std::string level); protected: /** An enum storing the various log level. */ enum _LOG_LEVEL { _notset = 0, /**< lvl 0, Notset general value */ _debug = 10, /**< lvl 10, Debug level */ _info = 20, /**< lvl 20, Info level */ _warning = 30, /**< lvl 30, Warning level*/ _error = 40, /**< lvl 40, Error level */ _critical = 100, /**< lvl 100, Only critical level */ }; //const unsigned int MAX_N_WARNING=10; /** * Set the static log_level. * @param level */ inline void set_level(int level) {log_level=level;}; static int log_level; /*!< Current log level */ //GI This counter should be thread safe because each update is proteceted by the openmp atomic directive. static unsigned int count_debug; /*!< Counter storing how many times a debug log has been called*/ static unsigned int count_info; /*!< Counter storing how many times a info log has been called*/ static unsigned int count_warning; /*!< Counter storing how many times a warning log has been called*/ static unsigned int count_error; /*!< Counter storing how many times an error log has been called*/ static unsigned int count_custom_log; /*!< Counter storing how many times a custom log has been called*/ //NB the critical has not a counter since it always throws an exception, so we cannot have more than one call at runtime. }; } #endif //SEVN_SEVNLOG_H
sort.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "sort.h" #include "timer.h" #include "io.h" #include "thd_info.h" /****************************************************************************** * DEFINES *****************************************************************************/ /* switch to insertion sort past this point */ #define MIN_QUICKSORT_SIZE 8 /* don't bother spawning threads for small sorts */ #define SMALL_SORT_SIZE 1000 /****************************************************************************** * STATIC FUNCTIONS *****************************************************************************/ /** * @brief Compares ind*[i] and ind*[j] for two-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. * @param i The index into ind*. * @param j The second index into ind*. * * @return Returns -1 if ind[i] < ind[j], 1 if ind[i] > ind[j], and 0 if they * are equal. */ static inline int p_ttcmp2( idx_t const * const ind0, idx_t const * const ind1, idx_t const i, idx_t const j) { if(ind0[i] < ind0[j]) { return -1; } else if(ind0[j] < ind0[i]) { return 1; } if(ind1[i] < ind1[j]) { return -1; } else if(ind1[j] < ind1[i]) { return 1; } return 0; } /** * @brief Compares ind*[i] and j[*] for two-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. * @param i The index into ind*[] * @param j[2] The indices we are comparing i against. * * @return Returns -1 if ind[i] < j, 1 if ind[i] > j, and 0 if they are equal. */ static inline int p_ttqcmp2( idx_t const * const ind0, idx_t const * const ind1, idx_t const i, idx_t const j[2]) { if(ind0[i] < j[0]) { return -1; } else if(j[0] < ind0[i]) { return 1; } if(ind1[i] < j[1]) { return -1; } else if(j[1] < ind1[i]) { return 1; } return 0; } /** * @brief Perform insertion sort on a 2-mode tensor between start and end, * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_insertionsort2( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; val_t * const vals = tt->vals; val_t vbuf; idx_t ibuf; for(size_t i=start+1; i < end; ++i) { size_t j = i; while (j > start && p_ttcmp2(ind0, ind1, i, j-1) < 0) { --j; } vbuf = vals[i]; /* shift all data */ memmove(vals+j+1, vals+j, (i-j)*sizeof(val_t)); vals[j] = vbuf; ibuf = ind0[i]; memmove(ind0+j+1, ind0+j, (i-j)*sizeof(idx_t)); ind0[j] = ibuf; ibuf = ind1[i]; memmove(ind1+j+1, ind1+j, (i-j)*sizeof(idx_t)); ind1[j] = ibuf; } } /** * @brief Perform quicksort on a 2-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_quicksort2( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { val_t vmid; idx_t imid[2]; idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; val_t * const vals = tt->vals; if((end-start) <= MIN_QUICKSORT_SIZE) { p_tt_insertionsort2(tt, cmplt, start, end); } else { size_t i = start+1; size_t j = end-1; size_t k = start + ((end - start) / 2); /* grab pivot */ vmid = vals[k]; vals[k] = vals[start]; imid[0] = ind0[k]; imid[1] = ind1[k]; ind0[k] = ind0[start]; ind1[k] = ind1[start]; while(i < j) { /* if tt[i] > mid -> tt[i] is on wrong side */ if(p_ttqcmp2(ind0,ind1,i,imid) == 1) { /* if tt[j] <= mid -> swap tt[i] and tt[j] */ if(p_ttqcmp2(ind0,ind1,j,imid) < 1) { val_t vtmp = vals[i]; vals[i] = vals[j]; vals[j] = vtmp; idx_t itmp = ind0[i]; ind0[i] = ind0[j]; ind0[j] = itmp; itmp = ind1[i]; ind1[i] = ind1[j]; ind1[j] = itmp; ++i; } --j; } else { /* if tt[j] > mid -> tt[j] is on right side */ if(p_ttqcmp2(ind0,ind1,j,imid) == 1) { --j; } ++i; } } /* if tt[i] > mid */ if(p_ttqcmp2(ind0,ind1,i,imid) == 1) { --i; } vals[start] = vals[i]; vals[i] = vmid; ind0[start] = ind0[i]; ind1[start] = ind1[i]; ind0[i] = imid[0]; ind1[i] = imid[1]; if(i > start + 1) { p_tt_quicksort2(tt, cmplt, start, i); } ++i; /* skip the pivot element */ if(end - i > 1) { p_tt_quicksort2(tt, cmplt, i, end); } } } /** * @brief Compares ind*[i] and j[*] for three-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. Defer tie-breaks to ind2. * @param ind2 The final tie-breaking mode. * @param i The index into ind*[] * @param j[3] The indices we are comparing i against. * * @return Returns -1 if ind[i] < j, 1 if ind[i] > j, and 0 if they are equal. */ static inline int p_ttqcmp3( idx_t const * const ind0, idx_t const * const ind1, idx_t const * const ind2, idx_t const i, idx_t const j[3]) { if(ind0[i] < j[0]) { return -1; } else if(j[0] < ind0[i]) { return 1; } if(ind1[i] < j[1]) { return -1; } else if(j[1] < ind1[i]) { return 1; } if(ind2[i] < j[2]) { return -1; } else if(j[2] < ind2[i]) { return 1; } return 0; } /** * @brief Compares ind*[i] and ind*[j] for three-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. Defer tie-breaks to ind2. * @param ind2 The final tie-breaking mode. * @param i The index into ind*. * @param j The second index into ind*. * * @return Returns -1 if ind[i] < ind[j], 1 if ind[i] > ind[j], and 0 if they * are equal. */ static inline int p_ttcmp3( idx_t const * const ind0, idx_t const * const ind1, idx_t const * const ind2, idx_t const i, idx_t const j) { if(ind0[i] < ind0[j]) { return -1; } else if(ind0[j] < ind0[i]) { return 1; } if(ind1[i] < ind1[j]) { return -1; } else if(ind1[j] < ind1[i]) { return 1; } if(ind2[i] < ind2[j]) { return -1; } else if(ind2[j] < ind2[i]) { return 1; } return 0; } /** * @brief Compares ind*[i] and ind*[j] for n-mode tensors. * * @param tt The tensor we are sorting. * @param cmplt Mode permutation used for defining tie-breaking order. * @param i The index into ind*. * @param j The second index into ind*. * * @return Returns -1 if ind[i] < ind[j], 1 if ind[i] > ind[j], and 0 if they * are equal. */ static inline int p_ttcmp( sptensor_t const * const tt, idx_t const * const cmplt, idx_t const i, idx_t const j) { for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->ind[cmplt[m]][i] < tt->ind[cmplt[m]][j]) { return -1; } else if(tt->ind[cmplt[m]][j] < tt->ind[cmplt[m]][i]) { return 1; } } return 0; } /** * @brief Compares ind*[i] and ind*[j] for n-mode tensors. * * @param tt The tensor we are sorting. * @param cmplt Mode permutation used for defining tie-breaking order. * @param i The index into ind*. * @param j The coordinate we are comparing against. * * @return Returns -1 if ind[i] < j, 1 if ind[i] > j, and 0 if they are equal. */ static inline int p_ttqcmp( sptensor_t const * const tt, idx_t const * const cmplt, idx_t const i, idx_t const j[MAX_NMODES]) { for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->ind[cmplt[m]][i] < j[cmplt[m]]) { return -1; } else if(j[cmplt[m]] < tt->ind[cmplt[m]][i]) { return 1; } } return 0; } /** * @brief Swap nonzeros i and j. * * @param tt The tensor to operate on. * @param i The first nonzero to swap. * @param j The second nonzero to swap with. */ static inline void p_ttswap( sptensor_t * const tt, idx_t const i, idx_t const j) { val_t vtmp = tt->vals[i]; tt->vals[i] = tt->vals[j]; tt->vals[j] = vtmp; idx_t itmp; for(idx_t m=0; m < tt->nmodes; ++m) { itmp = tt->ind[m][i]; tt->ind[m][i] = tt->ind[m][j]; tt->ind[m][j] = itmp; } } /** * @brief Perform insertion sort on a 3-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_insertionsort3( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; idx_t * const ind2 = tt->ind[cmplt[2]]; val_t * const vals = tt->vals; val_t vbuf; idx_t ibuf; for(size_t i=start+1; i < end; ++i) { size_t j = i; while (j > start && p_ttcmp3(ind0, ind1, ind2, i, j-1) < 0) { --j; } vbuf = vals[i]; /* shift all data */ memmove(vals+j+1, vals+j, (i-j)*sizeof(val_t)); vals[j] = vbuf; ibuf = ind0[i]; memmove(ind0+j+1, ind0+j, (i-j)*sizeof(idx_t)); ind0[j] = ibuf; ibuf = ind1[i]; memmove(ind1+j+1, ind1+j, (i-j)*sizeof(idx_t)); ind1[j] = ibuf; ibuf = ind2[i]; memmove(ind2+j+1, ind2+j, (i-j)*sizeof(idx_t)); ind2[j] = ibuf; } } /** * @brief Perform insertion sort on an n-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_insertionsort( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { idx_t * ind; val_t * const vals = tt->vals; idx_t const nmodes = tt->nmodes; val_t vbuf; idx_t ibuf; for(size_t i=start+1; i < end; ++i) { size_t j = i; while (j > start && p_ttcmp(tt, cmplt, i, j-1) < 0) { --j; } vbuf = vals[i]; /* shift all data */ memmove(vals+j+1, vals+j, (i-j)*sizeof(val_t)); vals[j] = vbuf; for(idx_t m=0; m < nmodes; ++m) { ind = tt->ind[m]; ibuf = ind[i]; memmove(ind+j+1, ind+j, (i-j)*sizeof(idx_t)); ind[j] = ibuf; } } } /** * @brief Perform quicksort on a 3-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_quicksort3( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { val_t vmid; idx_t imid[3]; idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; idx_t * const ind2 = tt->ind[cmplt[2]]; val_t * const vals = tt->vals; if((end-start) <= MIN_QUICKSORT_SIZE) { p_tt_insertionsort3(tt, cmplt, start, end); } else { size_t i = start+1; size_t j = end-1; size_t k = start + ((end - start) / 2); /* grab pivot */ vmid = vals[k]; vals[k] = vals[start]; imid[0] = ind0[k]; imid[1] = ind1[k]; imid[2] = ind2[k]; ind0[k] = ind0[start]; ind1[k] = ind1[start]; ind2[k] = ind2[start]; while(i < j) { /* if tt[i] > mid -> tt[i] is on wrong side */ if(p_ttqcmp3(ind0,ind1,ind2,i,imid) == 1) { /* if tt[j] <= mid -> swap tt[i] and tt[j] */ if(p_ttqcmp3(ind0,ind1,ind2,j,imid) < 1) { val_t vtmp = vals[i]; vals[i] = vals[j]; vals[j] = vtmp; idx_t itmp = ind0[i]; ind0[i] = ind0[j]; ind0[j] = itmp; itmp = ind1[i]; ind1[i] = ind1[j]; ind1[j] = itmp; itmp = ind2[i]; ind2[i] = ind2[j]; ind2[j] = itmp; ++i; } --j; } else { /* if tt[j] > mid -> tt[j] is on right side */ if(p_ttqcmp3(ind0,ind1,ind2,j,imid) == 1) { --j; } ++i; } } /* if tt[i] > mid */ if(p_ttqcmp3(ind0,ind1,ind2,i,imid) == 1) { --i; } vals[start] = vals[i]; vals[i] = vmid; ind0[start] = ind0[i]; ind1[start] = ind1[i]; ind2[start] = ind2[i]; ind0[i] = imid[0]; ind1[i] = imid[1]; ind2[i] = imid[2]; if(i > start + 1) { p_tt_quicksort3(tt, cmplt, start, i); } ++i; /* skip the pivot element */ if(end - i > 1) { p_tt_quicksort3(tt, cmplt, i, end); } } } /** * @brief Perform quicksort on a n-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_quicksort( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { val_t vmid; idx_t imid[MAX_NMODES]; idx_t * ind; val_t * const vals = tt->vals; idx_t const nmodes = tt->nmodes; if((end-start) <= MIN_QUICKSORT_SIZE) { p_tt_insertionsort(tt, cmplt, start, end); } else { size_t i = start+1; size_t j = end-1; size_t k = start + ((end - start) / 2); /* grab pivot */ vmid = vals[k]; vals[k] = vals[start]; for(idx_t m=0; m < nmodes; ++m) { ind = tt->ind[m]; imid[m] = ind[k]; ind[k] = ind[start]; } while(i < j) { /* if tt[i] > mid -> tt[i] is on wrong side */ if(p_ttqcmp(tt,cmplt,i,imid) == 1) { /* if tt[j] <= mid -> swap tt[i] and tt[j] */ if(p_ttqcmp(tt,cmplt,j,imid) < 1) { p_ttswap(tt,i,j); ++i; } --j; } else { /* if tt[j] > mid -> tt[j] is on right side */ if(p_ttqcmp(tt,cmplt,j,imid) == 1) { --j; } ++i; } } /* if tt[i] > mid */ if(p_ttqcmp(tt,cmplt,i,imid) == 1) { --i; } vals[start] = vals[i]; vals[i] = vmid; for(idx_t m=0; m < nmodes; ++m) { ind = tt->ind[m]; ind[start] = ind[i]; ind[i] = imid[m]; } if(i > start + 1) { p_tt_quicksort(tt, cmplt, start, i); } ++i; /* skip the pivot element */ if(end - i > 1) { p_tt_quicksort(tt, cmplt, i, end); } } } /** * @brief Perform a simple serial quicksort. * * @param a The array to sort. * @param n The length of the array. */ static void p_quicksort( idx_t * const a, idx_t const n) { if(n < MIN_QUICKSORT_SIZE) { insertion_sort(a, n); } else { size_t i = 1; size_t j = n-1; size_t k = n >> 1; idx_t mid = a[k]; a[k] = a[0]; while(i < j) { if(a[i] > mid) { /* a[i] is on the wrong side */ if(a[j] <= mid) { /* swap a[i] and a[j] */ idx_t tmp = a[i]; a[i] = a[j]; a[j] = tmp; ++i; } --j; } else { if(a[j] > mid) { /* a[j] is on the right side */ --j; } ++i; } } if(a[i] > mid) { --i; } a[0] = a[i]; a[i] = mid; if(i > 1) { p_quicksort(a,i); } ++i; /* skip the pivot element */ if(n-i > 1) { p_quicksort(a+i, n-i); } } } /** * @brief Perform a simple serial quicksort with permutation tracking. * * @param a The array to sort. * @param perm The permutation array. * @param n The length of the array. */ static void p_quicksort_perm( idx_t * const restrict a, idx_t * const restrict perm, idx_t const n) { if(n < MIN_QUICKSORT_SIZE) { insertion_sort_perm(a, perm, n); } else { size_t i = 1; size_t j = n-1; size_t k = n >> 1; idx_t mid = a[k]; idx_t pmid = perm[k]; a[k] = a[0]; perm[k] = perm[0]; while(i < j) { if(a[i] > mid) { /* a[i] is on the wrong side */ if(a[j] <= mid) { /* swap a[i] and a[j] */ idx_t tmp = a[i]; a[i] = a[j]; a[j] = tmp; /* swap perm */ tmp = perm[i]; perm[i] = perm[j]; perm[j] = tmp; ++i; } --j; } else { if(a[j] > mid) { /* a[j] is on the right side */ --j; } ++i; } } if(a[i] > mid) { --i; } a[0] = a[i]; a[i] = mid; /* track median too */ perm[0] = perm[i]; perm[i] = pmid; if(i > 1) { p_quicksort_perm(a, perm, i); } ++i; /* skip the pivot element */ if(n-i > 1) { p_quicksort_perm(a+i, perm+i, n-i); } } } /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline idx_t p_transpose_idx( idx_t const idx, idx_t const dim1, idx_t const dim2) { return idx%dim1*dim2 + idx/dim1; } /** * @brief Perform a counting sort on the most significant mode (cmplt[0]) and * then parallel quicksorts on each of slices. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. */ static void p_counting_sort_hybrid( sptensor_t * const tt, idx_t * const cmplt) { idx_t m = cmplt[0]; idx_t nslices = tt->dims[m]; idx_t * new_ind[MAX_NMODES]; for(idx_t i = 0; i < tt->nmodes; ++i) { if(i != m) { new_ind[i] = splatt_malloc(tt->nnz * sizeof(**new_ind)); } } val_t * new_vals = splatt_malloc(tt->nnz * sizeof(*new_vals)); idx_t * histogram_array = splatt_malloc( (nslices * splatt_omp_get_max_threads() + 1) * sizeof(*histogram_array)); #pragma omp parallel { int nthreads = splatt_omp_get_num_threads(); int tid = splatt_omp_get_thread_num(); idx_t * histogram = histogram_array + (nslices * tid); memset(histogram, 0, nslices * sizeof(idx_t)); idx_t j_per_thread = (tt->nnz + nthreads - 1)/nthreads; idx_t jbegin = SS_MIN(j_per_thread*tid, tt->nnz); idx_t jend = SS_MIN(jbegin + j_per_thread, tt->nnz); /* count */ for(idx_t j = jbegin; j < jend; ++j) { idx_t idx = tt->ind[m][j]; ++histogram[idx]; } #pragma omp barrier /* prefix sum */ for(idx_t j = (tid*nslices) + 1; j < (tid+1) * nslices; ++j) { idx_t transpose_j = p_transpose_idx(j, nthreads, nslices); idx_t transpose_j_minus_1 = p_transpose_idx(j - 1, nthreads, nslices); histogram_array[transpose_j] += histogram_array[transpose_j_minus_1]; } #pragma omp barrier #pragma omp master { for(int t = 1; t < nthreads; ++t) { idx_t j0 = (nslices*t) - 1, j1 = nslices * (t+1) - 1; idx_t transpose_j0 = p_transpose_idx(j0, nthreads, nslices); idx_t transpose_j1 = p_transpose_idx(j1, nthreads, nslices); histogram_array[transpose_j1] += histogram_array[transpose_j0]; } } #pragma omp barrier if (tid > 0) { idx_t transpose_j0 = p_transpose_idx(nslices*tid - 1, nthreads, nslices); for(idx_t j = tid*nslices; j < (tid+1) * nslices - 1; ++j) { idx_t transpose_j = p_transpose_idx(j, nthreads, nslices); histogram_array[transpose_j] += histogram_array[transpose_j0]; } } #pragma omp barrier /* now copy values into new structures (but not the mode we are sorting */ for(idx_t j_off = 0; j_off < (jend-jbegin); ++j_off) { /* we are actually going backwards */ idx_t const j = jend - j_off - 1; idx_t idx = tt->ind[m][j]; --histogram[idx]; idx_t offset = histogram[idx]; new_vals[offset] = tt->vals[j]; for(idx_t mode=0; mode < tt->nmodes; ++mode) { if(mode != m) { new_ind[mode][offset] = tt->ind[mode][j]; } } } } /* omp parallel */ for(idx_t i = 0; i < tt->nmodes; ++i) { if(i != m) { splatt_free(tt->ind[i]); tt->ind[i] = new_ind[i]; } } splatt_free(tt->vals); tt->vals = new_vals; histogram_array[nslices] = tt->nnz; /* for 3/4D, we can use quicksort on only the leftover modes */ if(tt->nmodes == 3) { #pragma omp parallel for schedule(dynamic) for(idx_t i = 0; i < nslices; ++i) { p_tt_quicksort2(tt, cmplt+1, histogram_array[i], histogram_array[i + 1]); for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) { tt->ind[m][j] = i; } } } else if(tt->nmodes == 4) { #pragma omp parallel for schedule(dynamic) for(idx_t i = 0; i < nslices; ++i) { p_tt_quicksort3(tt, cmplt+1, histogram_array[i], histogram_array[i + 1]); for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) { tt->ind[m][j] = i; } } } else { /* shift cmplt left one time, then do normal quicksort */ idx_t saved = cmplt[0]; memmove(cmplt, cmplt+1, (tt->nmodes - 1) * sizeof(*cmplt)); cmplt[tt->nmodes-1] = saved; #pragma omp parallel for schedule(dynamic) for(idx_t i = 0; i < nslices; ++i) { p_tt_quicksort(tt, cmplt, histogram_array[i], histogram_array[i + 1]); for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) { tt->ind[m][j] = i; } } /* undo cmplt changes */ saved = cmplt[tt->nmodes-1]; memmove(cmplt+1, cmplt, (tt->nmodes - 1) * sizeof(*cmplt)); cmplt[0] = saved; } splatt_free(histogram_array); } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void tt_sort( sptensor_t * const tt, idx_t const mode, idx_t * dim_perm) { tt_sort_range(tt, mode, dim_perm, 0, tt->nnz); } void tt_sort_range( sptensor_t * const tt, idx_t const mode, idx_t * dim_perm, idx_t const start, idx_t const end) { idx_t * cmplt; if(dim_perm == NULL) { cmplt = (idx_t*) splatt_malloc(tt->nmodes * sizeof(idx_t)); cmplt[0] = mode; for(idx_t m=1; m < tt->nmodes; ++m) { cmplt[m] = (mode + m) % tt->nmodes; } } else { cmplt = dim_perm; } timer_start(&timers[TIMER_SORT]); if(start == 0 && end == tt->nnz) { p_counting_sort_hybrid(tt, cmplt); /* sort a subtensor */ } else { if(tt->nmodes == 3) { p_tt_quicksort3(tt, cmplt, start, end); } else { p_tt_quicksort(tt, cmplt, start, end); } } if(dim_perm == NULL) { splatt_free(cmplt); } timer_stop(&timers[TIMER_SORT]); } void insertion_sort( idx_t * const a, idx_t const n) { timer_start(&timers[TIMER_SORT]); for(size_t i=1; i < n; ++i) { idx_t b = a[i]; size_t j = i; while (j > 0 && a[j-1] > b) { --j; } memmove(a+(j+1), a+j, sizeof(*a)*(i-j)); a[j] = b; } timer_stop(&timers[TIMER_SORT]); } void quicksort( idx_t * const a, idx_t const n) { timer_start(&timers[TIMER_SORT]); p_quicksort(a,n); timer_stop(&timers[TIMER_SORT]); } void insertion_sort_perm( idx_t * const restrict a, idx_t * const restrict perm, idx_t const n) { timer_start(&timers[TIMER_SORT]); for(size_t i=1; i < n; ++i) { idx_t b = a[i]; idx_t pb = perm[i]; size_t j = i; while (j > 0 && a[j-1] > b) { --j; } memmove(a+(j+1), a+j, sizeof(*a)*(i-j)); a[j] = b; memmove(perm+(j+1), perm+j, sizeof(*perm)*(i-j)); perm[j] = pb; } timer_stop(&timers[TIMER_SORT]); } void quicksort_perm( idx_t * const restrict a, idx_t * const restrict perm, idx_t const n) { timer_start(&timers[TIMER_SORT]); /* initialize permutation */ for(idx_t i=0; i < n; ++i) { perm[i] = i; } p_quicksort_perm(a, perm, n); timer_stop(&timers[TIMER_SORT]); }
iochain.c
/* * IOchain - Distribute a chain of dependant IO events amoung threads. * * This file is part of Bitshuffle * Author: Kiyoshi Masui <kiyo@physics.ubc.ca> * Website: http://www.github.com/kiyo-masui/bitshuffle * Created: 2014 * * See LICENSE file for details about copyright and rights to use. * */ #include "iochain.h" #include <stdlib.h> void ioc_init(ioc_chain *C, const void *in_ptr_0, void *out_ptr_0) { #ifdef _OPENMP omp_init_lock(&C->next_lock); for (size_t ii = 0; ii < IOC_SIZE; ii++) { omp_init_lock(&(C->in_pl[ii].lock)); omp_init_lock(&(C->out_pl[ii].lock)); } #endif C->next = 0; C->in_pl[0].ptr = in_ptr_0; C->out_pl[0].ptr = out_ptr_0; } void ioc_destroy(ioc_chain *C) { #ifdef _OPENMP omp_destroy_lock(&C->next_lock); for (size_t ii = 0; ii < IOC_SIZE; ii++) { omp_destroy_lock(&(C->in_pl[ii].lock)); omp_destroy_lock(&(C->out_pl[ii].lock)); } #endif } const void *ioc_get_in(ioc_chain *C, size_t *this_iter) { #ifdef _OPENMP omp_set_lock(&C->next_lock); #pragma omp flush #endif *this_iter = C->next; C->next++; #ifdef _OPENMP omp_set_lock(&(C->in_pl[*this_iter % IOC_SIZE].lock)); omp_set_lock(&(C->in_pl[(*this_iter + 1) % IOC_SIZE].lock)); omp_set_lock(&(C->out_pl[(*this_iter + 1) % IOC_SIZE].lock)); omp_unset_lock(&C->next_lock); #endif return C->in_pl[*this_iter % IOC_SIZE].ptr; } void ioc_set_next_in(ioc_chain *C, size_t *this_iter, void *in_ptr) { C->in_pl[(*this_iter + 1) % IOC_SIZE].ptr = in_ptr; #ifdef _OPENMP omp_unset_lock(&(C->in_pl[(*this_iter + 1) % IOC_SIZE].lock)); #endif } void *ioc_get_out(ioc_chain *C, size_t *this_iter) { #ifdef _OPENMP omp_set_lock(&(C->out_pl[(*this_iter) % IOC_SIZE].lock)); #pragma omp flush #endif void *out_ptr = C->out_pl[*this_iter % IOC_SIZE].ptr; #ifdef _OPENMP omp_unset_lock(&(C->out_pl[(*this_iter) % IOC_SIZE].lock)); #endif return out_ptr; } void ioc_set_next_out(ioc_chain *C, size_t *this_iter, void *out_ptr) { C->out_pl[(*this_iter + 1) % IOC_SIZE].ptr = out_ptr; #ifdef _OPENMP omp_unset_lock(&(C->out_pl[(*this_iter + 1) % IOC_SIZE].lock)); omp_unset_lock(&(C->in_pl[(*this_iter) % IOC_SIZE].lock)); #endif }
trsm_x_bsr_u_hi_row.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { const ALPHA_INT num_thread = alpha_get_thread_num(); const ALPHA_INT bs = A->block_size; const ALPHA_INT m = A->rows * bs; const ALPHA_INT n = A->cols * bs; const ALPHA_INT bs2 = bs * bs; const ALPHA_INT b_rows = m / bs; const ALPHA_INT b_cols = n / bs; const alphasparse_layout_t block_layout = A->block_layout; if(block_layout != ALPHA_SPARSE_LAYOUT_ROW_MAJOR) { printf("layout not consistent!!!\n"); exit(-1); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { ALPHA_Number* temp = (ALPHA_Number*) alpha_malloc(bs*sizeof(ALPHA_Number)); for (ALPHA_INT br = b_rows - 1; br >= 0; br--) { for(ALPHA_INT i = 0 ; i < bs ; i++){ alpha_setzero(temp[i]); } ALPHA_INT diagBlock = -1; for (ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++) { ALPHA_INT bc = A->col_indx[ai]; if(bc > br) //row-major for(ALPHA_INT row = 0; row < bs; row++) { //all entities belongs to upper triangle ALPHA_INT a0_offset = ai * bs2 + row * bs; for(ALPHA_INT col = 0 ; col < bs ; col++) { ALPHA_INT y_offset = (bc * bs + col) * ldy + out_y_col; ALPHA_INT ele_offset = a0_offset + col; alpha_madde(temp[row], A->values[ ele_offset ] ,y[y_offset]); } } //diagonal must be none-zero block if( bc==br ){ diagBlock = ai; } } if(diagBlock == -1) { printf("lhs matrix invalid for trsm!!!\n"); exit(-1); } //row-major //right-bottom most for(ALPHA_INT row = bs - 1; row >=0 ; row--) { //upper triangle of block for(ALPHA_INT col = row + 1 ; col < bs ; col++){ ALPHA_INT y_offset = (br * bs + col) * ldy + out_y_col; alpha_madde(temp[row] ,A->values[ diagBlock * bs2 + row * bs + col] ,y[y_offset]); } ALPHA_Number t; alpha_setzero(t); alpha_mul(t,alpha,x[(br * bs + row) * ldx + out_y_col] ); alpha_sub(y[(br * bs + row) * ldy + out_y_col],t,temp[row]); } } alpha_free(temp); } return ALPHA_SPARSE_STATUS_SUCCESS; }
omp_ex_19.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> /* MIT License Copyright (c) 2019 NOUREDDINE DAGHBOUDJ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define N 1024 void initArray(unsigned int *array, unsigned int size) { for(unsigned int i=0; i<size; i++) array[i] = rand() % 40 + 10; } void printArray(unsigned int *array, unsigned int size) { for(unsigned int i=0; i<size; i++) printf("%i ", array[i]); printf("\n"); } void addArrays(unsigned int *A, unsigned int *B, unsigned int *C, unsigned int size) { #pragma omp parallel { #pragma omp for for(unsigned int i=0; i<size; i++) { C[i] = A[i] + B[i]; } } } int main() { unsigned int a[N], b[N], c[N]; srand(0); initArray(a, N); initArray(b, N); addArrays(a, b, c, N); printf("C = A + B\n"); printf("A = "); printArray(a, 16); printf("B = "); printArray(b, 16); printf("C = "); printArray(c, 16); return 0; }
nested_thread_num.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt // UNSUPPORTE: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <unistd.h> int main() { int condition = 0; omp_set_nested(1); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); // get all implicit task events before starting nested: #pragma omp barrier #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_ids(2); print_frame(0); OMPT_SIGNAL(condition); OMPT_WAIT(condition, 4); #pragma omp barrier print_fuzzy_address(1); print_ids(0); } print_fuzzy_address(2); print_ids(0); } print_fuzzy_address(3); return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], // CHECK-SAME: parent_task_frame.exit=[[NULL]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: requested_team_size=2, // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: // Note that we cannot ensure that the worker threads have already called // barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // CHECK: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // THREADS: {{^}}0: NULL_POINTER=[[NULL:.*$]] // THREADS: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // THREADS-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], // THREADS-SAME: parent_task_frame.exit=[[NULL]], // THREADS-SAME: parent_task_frame.reenter=[[MAIN_REENTER]], // THREADS-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // THREADS-SAME: invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]], // THREADS-SAME: team_size=2, thread_num=0 // THREADS: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // THREADS-SAME: reenter_frame=[[NULL]], // THREADS-SAME: thread_num=0 // THREADS: {{^}}[[MASTER_ID]]: task level 1: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], // THREADS-SAME: reenter_frame=[[MAIN_REENTER]] // THREADS: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: // THREADS-SAME: parent_task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: parent_task_frame.exit=[[EXIT]], // THREADS-SAME: parent_task_frame.reenter=[[REENTER]], // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], // THREADS-SAME: requested_team_size=2, // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // THREADS-SAME: invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]], team_size=2, // THREADS-SAME: thread_num=0 // THREADS: __builtin_frame_address({{.}})=[[NESTED_EXIT:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]], // THREADS-SAME: thread_num=0 // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // THREADS-SAME: reenter_frame=[[REENTER]] // THREADS: {{^}}[[MASTER_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], // THREADS-SAME: reenter_frame=[[MAIN_REENTER]] // THREADS: __builtin_frame_address(0)=[[NESTED_REENTER:0x[0-f]+]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // explicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[BARRIER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NESTED_EXIT]], reenter_frame=[[NESTED_REENTER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[BARRIER_RETURN_ADDRESS]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]] // implicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: invoker=[[PARALLEL_INVOKER]], // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // THREADS-SAME: reenter_frame=[[NULL]] // implicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], // THREADS-SAME: reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], // THREADS-SAME: invoker=[[PARALLEL_INVOKER]], // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // Worker of first nesting level // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]], team_size=2, // THREADS-SAME: thread_num=[[OUTER_THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: thread_num=[[OUTER_THREADNUM]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: // THREADS-SAME: parent_task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: parent_task_frame.exit={{0x[0-f]+}}, // THREADS-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=2, // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, // THREADS-SAME: invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]], team_size=2, // THREADS-SAME: thread_num=[[INNER_THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: thread_num=[[INNER_THREADNUM]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: thread_num=[[OUTER_THREADNUM]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // nested parallel worker threads // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS-SAME: thread_num=[[THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: thread_num=[[THREADNUM]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, // THREADS-SAME: task_id={{[0-9]+}} // THREADS-SAME: thread_num={{[01]}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-SAME: thread_num=0 // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // other nested parallel worker threads // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS-SAME: thread_num=[[THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: thread_num=[[THREADNUM]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, // THREADS-SAME: task_id={{[0-9]+}} // THREADS-SAME: thread_num={{[01]}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-SAME: thread_num=0 // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
layerramdistancetransform.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2017 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_LAYERRAMDISTANCETRANSFORM_H #define IVW_LAYERRAMDISTANCETRANSFORM_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/util/indexmapper.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #ifndef __clang__ #include <omp.h> #endif namespace inviwo { namespace util { /** * Implementation of Euclidean Distance Transform according to Saito's algorithm: * T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations * of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11). * pp. 1551-1565, 1994. * http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf * * Calculates the distance in base mat space * * Predicate is a function of type (const T &value) -> bool to deside if a value in the input * is a "feature". * * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all * squared distance values at the end of the calculation. * * ProcessCallback is a function of type (double progress) -> void that is called with a value * from 0 to 1 to indicate the progress of the calculation. */ template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename T, typename U> void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inVolume, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample); template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename U, typename ProgressCallback> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback callback); template <typename U> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale); } // namespace template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { #ifndef __clang__ omp_set_num_threads(std::thread::hardware_concurrency()); #endif using int64 = glm::int64; using i64vec2 = glm::tvec2<int64>; auto square = [](auto a) { return a * a; }; callback(0.0); const T *src = inLayer->getDataTyped(); U *dst = outDistanceField->getDataTyped(); const i64vec2 srcDim{ inLayer->getDimensions() }; const i64vec2 dstDim{ outDistanceField->getDimensions() }; const i64vec2 sm{ upsample }; const auto squareBasis = glm::transpose(basis) * basis; const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]}; const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}}; const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize}; { const auto maxdist = glm::compMax(squareBasisDiag); bool orthogonal = true; for (size_t i = 0; i < squareBasis.length(); i++) { for (size_t j = 0; j < squareBasis.length(); j++) { if (i != j) { if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) { orthogonal = false; break; } } } } if (!orthogonal) { LogWarnCustom( "layerRAMDistanceTransform", "Calculating the distance transform on a non-orthogonal layer will not give " "correct values"); } } if (srcDim * sm != dstDim) { throw Exception("DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) + " dst = " + toString(dstDim) + " scaling = " + toString(sm), IvwContextCustom("layerRAMDistanceTransform")); } util::IndexMapper<2, int64> srcInd(srcDim); util::IndexMapper<2, int64> dstInd(dstDim); auto is_feature = [&](const int64 x, const int64 y) { return predicate(src[srcInd(x / sm.x, y / sm.y)]); }; // first pass, forward and backward scan along x // result: min distance in x direction #pragma omp parallel for for (int64 y = 0; y < dstDim.y; ++y) { // forward U dist = static_cast<U>(dstDim.x); for (int64 x = 0; x < dstDim.x; ++x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = squareVoxelSize.x * square(dist); } // backward dist = static_cast<U>(dstDim.x); for (int64 x = dstDim.x - 1; x >= 0; --x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist)); } } // second pass, scan y direction // for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY // result: min distance in x and y direction callback(0.45); #pragma omp parallel { std::vector<U> buff; buff.resize(dstDim.y); #pragma omp for for (int64 x = 0; x < dstDim.x; ++x) { // cache column data into temporary buffer for (int64 y = 0; y < dstDim.y; ++y) { buff[y] = dst[dstInd(x, y)]; } for (int64 y = 0; y < dstDim.y; ++y) { auto d = buff[y]; if (d != U(0)) { const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1; const auto rStart = std::min(rMax, y - 1); const auto rEnd = std::min(rMax, dstDim.y - y); for (int64 n = -rStart; n < rEnd; ++n) { const auto w = buff[y + n] + squareVoxelSize.y * square(n); if (w < d) d = w; } } dst[dstInd(x, y)] = d; } } } // scale data callback(0.9); const int64 layerSize = dstDim.x * dstDim.y; #pragma omp parallel for for (int64 i = 0; i < layerSize; ++i) { dst[i] = valueTransform(dst[i]); } callback(1.0); } template <typename T, typename U> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample) { util::layerRAMDistanceTransform( inLayer, outDistanceField, basis, upsample, [](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; }, [](const U &squareDist) { return static_cast<U>(std::sqrt(static_cast<double>(squareDist))); }, [](double f) {}); } template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicate, valueTransform, callback); }); } template <typename U, typename ProgressCallback> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback progress) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { using ValueType = util::PrecsionValueType<decltype(lrprecision)>; const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; }; const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; }; const auto normPredicateIn = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) < threshold; }; const auto normPredicateOut = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) > threshold; }; const auto valTransIdent = [scale](const float &squareDist) { return static_cast<float>(scale * squareDist); }; const auto valTransSqrt = [scale](const float &squareDist) { return static_cast<float>(scale * std::sqrt(squareDist)); }; if (normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransIdent, progress); } else if (normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransIdent, progress); } else if (normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransSqrt, progress); } else if (normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransSqrt, progress); } else if (!normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransIdent, progress); } else if (!normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransIdent, progress); } else if (!normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransSqrt, progress); } else if (!normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransSqrt, progress); } }); } template <typename U> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale) { util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip, square, scale, [](double d) {}); } } // namespace #endif // IVW_LAYERRAMDISTANCETRANSFORM_H
images_mormalize.h
#ifndef _IMAGES_NORMALIZE_H #define _IMAGES_NORMALIZE_H namespace cpp_torch { namespace test { // mean 0 and variance 1 /* * @param image [in/out] image data * @param image [out] image data mean * @param stddiv [out] image data variance */ void images_normalize(tiny_dnn::vec_t& image, float& mean, float& stddiv) { mean = 0.0; stddiv = 0.0; for (int k = 0; k < image.size(); k++) { mean += image[k]; } mean /= image.size(); for (int k = 0; k < image.size(); k++) { stddiv += (image[k] - mean)*(image[k] - mean); } stddiv = sqrt(stddiv / image.size()); //printf("mean:%f stddiv:%f\n", mean, stddiv); #pragma omp parallel for for (int k = 0; k < image.size(); k++) { image[k] = (image[k] - mean) / (stddiv + 1.0e-12); } } // mean 0 and variance 1 /* * @param image [in/out] image data vector */ void images_normalize_(std::vector<tiny_dnn::vec_t>& images) { #pragma omp parallel for for (int i = 0; i < images.size(); i++) { float mean = 0.0; float stddiv = 0.0; images_normalize(images[i], mean, stddiv); } } // mean 0 and variance 1 /* * @param image [in/out] image data vector * @param image [out] image data vector mean * @param stddiv [out] image data vector variance */ void images_normalize(std::vector<tiny_dnn::vec_t>& images, float& mean, float& stddiv) { mean = 0.0; stddiv = 0.0; for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { mean += images[i][k]; } } mean /= images.size()*images[0].size(); for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { stddiv += (images[i][k] - mean)*(images[i][k] - mean); } } stddiv = sqrt(stddiv / (images.size()*images[0].size())); //printf("mean:%f stddiv:%f\n", mean, stddiv); #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = (images[i][k] - mean) / (stddiv + 1.0e-12); } } } // mean 0 and variance 1 /* * @param image [in/out] image data vector * @param image [in] image data vector mean * @param stddiv [in] image data vector variance */ void images_normalize_(std::vector<tiny_dnn::vec_t>& images, const float mean, const float stddiv) { //printf("mean:%f stddiv:%f\n", mean, stddiv); #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = (images[i][k] - mean) / (stddiv + 1.0e-12); } } } // [-1, 1] /* * @param image [in/out] image data vector */ void images_normalize_11(std::vector<tiny_dnn::vec_t>& images) { #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = images[i][k]/ 128.0 - 1.0; } } } #if 10 // [-1, 1] /* * @param image [in/out] image data vector */ void images_normalize_11(std::vector<tiny_dnn::vec_t>& images, float& max, float& min) { std::vector<float_t> max_(images.size()); std::vector<float_t> min_(images.size()); for (int i = 0; i < images.size(); i++) { max_[i] = -999999.0; min_[i] = 999999.0; for (int k = 0; k < images[i].size(); k++) { if (min_[i] > images[i][k]) min_[i] = images[i][k]; if (max_[i] < images[i][k]) max_[i] = images[i][k]; } } max = 0, min = 0; for (int i = 0; i < images.size(); i++) { max += max_[i]; min += min_[i]; } max /= images.size(); min /= images.size(); #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = 2.0*(images[i][k] - min) / (max - min) - 1.0; } } } #else // [-1, 1] /* * @param image [in/out] image data vector */ void images_normalize_11(std::vector<tiny_dnn::vec_t>& images, float& max_, float& min_) { max_ = -9999999.0; min_ = 9999999.0; for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { if (min_ > images[i][k]) min_ = images[i][k]; if (max_ < images[i][k]) max_ = images[i][k]; } } #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = 2.0*(images[i][k] - min_)/ (max_ - min_) - 1.0; } } } #endif // [-1, 1] /* * @param image [in/out] image data vector */ void images_normalize_11_(std::vector<tiny_dnn::vec_t>& images, const float max_, const float min_) { #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = 2.0*(images[i][k] - min_) / (max_ - min_) - 1.0; } } } // [0, 1] /* * @param image [in/out] image data vector */ void images_normalize_01(std::vector<tiny_dnn::vec_t>& images, float& max_, float& min_) { max_ = -9999999.0; min_ = 9999999.0; for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { if (min_ > images[i][k]) min_ = images[i][k]; if (max_ < images[i][k]) max_ = images[i][k]; } } #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = (images[i][k] - min_) / (max_ - min_); } } } // [0, 1] /* * @param image [in/out] image data vector */ void images_normalize_01_(std::vector<tiny_dnn::vec_t>& images, const float max_, const float min_) { #pragma omp parallel for for (int i = 0; i < images.size(); i++) { for (int k = 0; k < images[i].size(); k++) { images[i][k] = (images[i][k] - min_) / (max_ - min_); } } } } } #endif
omp_num_teams_SPMD.c
#include <stdio.h> #include <omp.h> int main() { int N = 1024; int NN = N*N; int Res[NN]; for (int i=0; i < NN; i++) Res[i] = -1; #pragma omp target teams thread_limit(1024) num_teams(N) #pragma omp distribute parallel for for (int j=0; j < NN; j++) { if (j==12) printf("teams %d threads %d\n",omp_get_num_teams(), omp_get_num_threads()); Res[j] = j; } for (int i=0; i < NN; i++) if (Res[i] != i) { printf("Failed %d %d\n",i, Res[i]); return 1; } return 0; }
re_model_template.h
/*! * This file is part of GPBoost a C++ library for combining * boosting with Gaussian process and mixed effects models * * Copyright (c) 2020 Fabio Sigrist. All rights reserved. * * Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information. */ #ifndef GPB_RE_MODEL_TEMPLATE_H_ #define GPB_RE_MODEL_TEMPLATE_H_ #define _USE_MATH_DEFINES // for M_PI #include <cmath> #include <GPBoost/type_defs.h> #include <GPBoost/re_comp.h> #include <GPBoost/sparse_matrix_utils.h> #include <GPBoost/Vecchia_utils.h> #include <GPBoost/GP_utils.h> #include <GPBoost/likelihoods.h> //#include <Eigen/src/misc/lapack.h> #define OPTIM_ENABLE_EIGEN_WRAPPERS #include "optim.hpp" #include <memory> #include <mutex> #include <vector> #include <algorithm> // std::shuffle #include <random> // std::default_random_engine //#include <typeinfo> // Only needed for debugging #include <chrono> // only for debugging #include <thread> // only for debugging //std::this_thread::sleep_for(std::chrono::milliseconds(200));// Only for debugging //std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();// Only for debugging //std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();// Only for debugging //double el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()) / 1000000.;// Only for debugging //Log::REInfo("Time for : %g", el_time);// Only for debugging //for (int j = 0; j < 3; ++j) {// Only for debugging // Log::REInfo("CalcPsiInv sparse: psi_inv[%d,0:2]: %g, %g, %g", j, psi_inv.coeffRef(j, 0), psi_inv.coeffRef(j, 1), psi_inv.coeffRef(j, 2)); //} #ifndef M_PI #define M_PI 3.1415926535897932384626433832795029 #endif #include <LightGBM/utils/log.h> using LightGBM::Log; namespace GPBoost { // Forward declaration template<typename T_mat, typename T_chol> class REModelTemplate; // Auxiliary class for passing data to EvalLLforOptimLib for OpimtLib template<typename T_mat, typename T_chol> class OptDataOptimLib { public: //Constructor OptDataOptimLib(REModelTemplate<T_mat, T_chol>* re_model_templ, const double* fixed_effects, bool learn_covariance_parameters, vec_t& cov_pars) { re_model_templ_ = re_model_templ; fixed_effects_ = fixed_effects; learn_covariance_parameters_ = learn_covariance_parameters; cov_pars_ = cov_pars; } REModelTemplate<T_mat, T_chol>* re_model_templ_; const double* fixed_effects_;//Externally provided fixed effects component of location parameter (only used for non-Gaussian data) bool learn_covariance_parameters_;//Indicates whether covariance parameters are optimized or not vec_t cov_pars_;//vector of covariance paramters (only used in case the covariance paramters are not estimated) };//end EvalLLforOptim class definition // Auxiliary function for optimiuation using OptimLib template<typename T_mat, typename T_chol> double EvalLLforOptimLib(const vec_t& pars, vec_t*, void* opt_data) { OptDataOptimLib<T_mat, T_chol>* objfn_data = reinterpret_cast<OptDataOptimLib<T_mat, T_chol>*>(opt_data); REModelTemplate<T_mat, T_chol>* re_model_templ_ = objfn_data->re_model_templ_; double neg_log_likelihood; vec_t cov_pars, beta, fixed_effects_vec; const double* fixed_effects_ptr; bool gauss_likelihood = re_model_templ_->GetLikelihood() == "gaussian"; bool has_covariates = re_model_templ_->HasCovariates(); // Determine number of covariance and linear regression coefficient paramters int num_cov_pars_optim, num_covariates; if (objfn_data->learn_covariance_parameters_) { if (gauss_likelihood) { num_cov_pars_optim = re_model_templ_->GetNumCovPar() - 1; } else { num_cov_pars_optim = re_model_templ_->GetNumCovPar(); } } else { num_cov_pars_optim = 0; } if (has_covariates) { num_covariates = (int)pars.size() - num_cov_pars_optim; } else { num_covariates = 0; } // Extract covariance paramters and regression coefficients from pars vector if (has_covariates) { beta = pars.segment(num_cov_pars_optim, num_covariates); re_model_templ_->UpdateFixedEffects(beta, objfn_data->fixed_effects_, fixed_effects_vec); fixed_effects_ptr = fixed_effects_vec.data(); }//end has_covariates else {//no covariates fixed_effects_ptr = objfn_data->fixed_effects_; } if (objfn_data->learn_covariance_parameters_) { if (gauss_likelihood) { cov_pars = vec_t(num_cov_pars_optim + 1); cov_pars[0] = 1.;//nugget effect cov_pars.segment(1, num_cov_pars_optim) = pars.segment(0, num_cov_pars_optim).array().exp().matrix();//back-transform to original scale } else { cov_pars = pars.segment(0, num_cov_pars_optim).array().exp().matrix();//back-transform to original scale } } else { cov_pars = objfn_data->cov_pars_; } // Calculate objective function if (gauss_likelihood) { if (objfn_data->learn_covariance_parameters_) { re_model_templ_->CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_ptr); cov_pars[0] = re_model_templ_->ProfileOutSigma2(); re_model_templ_->EvalNegLogLikelihoodOnlyUpdateNuggetVariance(cov_pars[0], neg_log_likelihood); } else { re_model_templ_->EvalNegLogLikelihoodOnlyUpdateFixedEffects(cov_pars.data(), neg_log_likelihood); } }//end gauss_likelihood_ else {//non-Gaussian data re_model_templ_->CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_ptr); neg_log_likelihood = re_model_templ_->GetNegLogLikelihood(); } return neg_log_likelihood; } /*! * \brief Template class used in the wrapper class REModel * The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t> * depending on whether dense or sparse linear matrix algebra is used */ template<typename T_mat, typename T_chol> class REModelTemplate { public: /*! \brief Null costructor */ REModelTemplate(); /*! * \brief Costructor * \param num_data Number of data points * \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization) * \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param num_re_group Number of grouped (intercept) random effects * \param re_group_rand_coef_data Covariate data for grouped random coefficients * \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1. * \param num_re_group_rand_coef Number of grouped random coefficient * \param num_gp Number of (intercept) Gaussian processes * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006) * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern and Wendland covariance. For the Wendland covariance function, we follow the notation of Bevilacqua et al. (2018)). This parameter is irrelevant for some covariance functions such as the exponential or Gaussian. * \param cov_fct_taper_range Range parameter of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018) * \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process * \param num_neighbors The number of neighbors used in the Vecchia approximation * \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering * \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points * \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions * \param likelihood Likelihood function for the observed response variable. Default = "gaussian" */ REModelTemplate(data_size_t num_data, const data_size_t* cluster_ids_data, const char* re_group_data, data_size_t num_re_group, const double* re_group_rand_coef_data, const data_size_t* ind_effect_group_rand_coef, data_size_t num_re_group_rand_coef, data_size_t num_gp, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const char* cov_fct, double cov_fct_shape, double cov_fct_taper_range, bool vecchia_approx, int num_neighbors, const char* vecchia_ordering, const char* vecchia_pred_type, int num_neighbors_pred, const char* likelihood) { CHECK(num_data > 0); num_data_ = num_data; vecchia_approx_ = vecchia_approx; //Set up likelihood string_t likelihood_strg; if (likelihood == nullptr) { likelihood_strg = "gaussian"; } else { likelihood_strg = std::string(likelihood); } gauss_likelihood_ = likelihood_strg == "gaussian"; //Set up GP IDs SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_); num_comps_total_ = 0; //Do some checks for grouped RE components and set meta data (number of components etc.) std::vector<std::vector<re_group_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j) if (num_re_group > 0) { if (vecchia_approx) { Log::REFatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation)."); } num_re_group_ = num_re_group; CHECK(re_group_data != nullptr); if (num_re_group_rand_coef > 0) { num_re_group_rand_coef_ = num_re_group_rand_coef; CHECK(re_group_rand_coef_data != nullptr); CHECK(ind_effect_group_rand_coef != nullptr); for (int j = 0; j < num_re_group_rand_coef_; ++j) { CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_); } ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_); } num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_; num_comps_total_ += num_re_group_total_; // Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels re_group_levels = std::vector<std::vector<re_group_t>>(num_re_group_, std::vector<re_group_t>(num_data_)); if (num_re_group_ > 0) { ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels); } } //Do some checks for GP components and set meta data (number of components etc.) if (num_gp > 0) { if (num_gp > 1) { Log::REFatal("num_gp can only be either 0 or 1 in the current implementation"); } num_gp_ = num_gp; ind_intercept_gp_ = num_comps_total_; CHECK(dim_gp_coords > 0); CHECK(gp_coords_data != nullptr); CHECK(cov_fct != nullptr); dim_gp_coords_ = dim_gp_coords; cov_fct_ = std::string(cov_fct); cov_fct_shape_ = cov_fct_shape; cov_fct_taper_range_ = cov_fct_taper_range; if (vecchia_approx) { Log::REInfo("Starting nearest neighbor search for Vecchia approximation"); CHECK(num_neighbors > 0); num_neighbors_ = num_neighbors; CHECK(num_neighbors_pred > 0); num_neighbors_pred_ = num_neighbors_pred; if (vecchia_ordering == nullptr) { vecchia_ordering_ = "none"; } else { vecchia_ordering_ = std::string(vecchia_ordering); CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random"); if (SUPPORTED_VECCHIA_ORDERING_.find(vecchia_ordering_) == SUPPORTED_VECCHIA_ORDERING_.end()) { Log::REFatal("Ordering of type '%s' is not supported for the Veccia approximation.", vecchia_ordering_.c_str()); } } if (vecchia_pred_type == nullptr) { vecchia_pred_type_ = "order_obs_first_cond_obs_only"; } else { vecchia_pred_type_ = std::string(vecchia_pred_type); if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) { Log::REFatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str()); } } } if (num_gp_rand_coef > 0) {//Random slopes CHECK(gp_rand_coef_data != nullptr); num_gp_rand_coef_ = num_gp_rand_coef; } num_gp_total_ = num_gp_ + num_gp_rand_coef_; num_comps_total_ += num_gp_total_; if (vecchia_approx) { double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_); int mem_size = (int)(num_mem_d * 8. / 1000000.); if (mem_size > 8000) { Log::REWarning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size); } } } DetermineSpecialCasesModelsEstimationPrediction(); //Create RE/GP component models for (const auto& cluster_i : unique_clusters_) { std::vector<std::shared_ptr<RECompBase<T_mat>>> re_comps_cluster_i; if (vecchia_approx_) { std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<Triplet_t> entries_init_B_cluster_i; std::vector<Triplet_t> entries_init_B_grad_cluster_i; std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); CreateREComponentsVecchia(num_data_, data_indices_per_cluster_, cluster_i, num_data_per_cluster_, gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, cov_fct_taper_range_, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, vecchia_ordering_, num_neighbors_); nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i }); dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i }); dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i }); entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i }); entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i }); z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i }); }//end vecchia_approx_ else {//not vecchia_approx_ CreateREComponents(num_data_, num_re_group_, data_indices_per_cluster_, cluster_i, re_group_levels, num_data_per_cluster_, num_re_group_rand_coef_, re_group_rand_coef_data, ind_effect_group_rand_coef_, num_gp_, gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, cov_fct_taper_range_, ind_intercept_gp_, !only_grouped_REs_use_woodbury_identity_, re_comps_cluster_i); }//end not vecchia_approx_ re_comps_.insert({ cluster_i, re_comps_cluster_i }); }//end loop over clusters //Create matrices Z and ZtZ if Woodbury identity is used (used only if there are only grouped REs and no GPs) if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity(); } InitializeIdentityMatricesForGaussianData(); if (vecchia_approx_) { Log::REInfo("Nearest neighbors for Vecchia approximation found"); } CheckCompatibilitySpecialOptions(); InitializeLikelihoods(likelihood_strg); DetermineCovarianceParameterIndicesNumCovPars(); }//end REModelTemplate /*! \brief Destructor */ ~REModelTemplate() { } /*! \brief Disable copy */ REModelTemplate& operator=(const REModelTemplate&) = delete; /*! \brief Disable copy */ REModelTemplate(const REModelTemplate&) = delete; /*! * \brief Returns the type of likelihood */ string_t GetLikelihood() { return(likelihood_[unique_clusters_[0]]->GetLikelihood()); } /*! * \brief Set / change the type of likelihood * \param likelihood Likelihood name */ void SetLikelihood(const string_t& likelihood) { bool gauss_likelihood_before = gauss_likelihood_; bool only_one_grouped_RE_calculations_on_RE_scale_before = only_one_grouped_RE_calculations_on_RE_scale_; bool only_grouped_REs_use_woodbury_identity_before = only_grouped_REs_use_woodbury_identity_; gauss_likelihood_ = likelihood == "gaussian"; DetermineSpecialCasesModelsEstimationPrediction(); CheckCompatibilitySpecialOptions(); //Make adaptions in re_comps_ for special options when switching between Gaussian and non-Gaussian likelihoods if (gauss_likelihood_before && !gauss_likelihood_) { if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) { for (const auto& cluster_i : unique_clusters_) { re_comps_[cluster_i][0]->DropZ(); } } } else if (!gauss_likelihood_before && gauss_likelihood_) { if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) { for (const auto& cluster_i : unique_clusters_) { re_comps_[cluster_i][0]->AddZ(); } } } //Matrices used when only_grouped_REs_use_woodbury_identity_==true if ((only_grouped_REs_use_woodbury_identity_ && !only_grouped_REs_use_woodbury_identity_before) || (only_grouped_REs_use_woodbury_identity_ && only_one_grouped_RE_calculations_on_RE_scale_before && !only_one_grouped_RE_calculations_on_RE_scale_)) { InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity(); } else if (!only_grouped_REs_use_woodbury_identity_) { //Delete not required matrices Zt_ = std::map<data_size_t, sp_mat_t>(); P_Zt_ = std::map<data_size_t, sp_mat_t>(); ZtZ_ = std::map<data_size_t, sp_mat_t>(); cum_num_rand_eff_ = std::map<data_size_t, std::vector<data_size_t>>(); Zj_square_sum_ = std::map<data_size_t, std::vector<double>>(); ZtZj_ = std::map<data_size_t, std::vector<sp_mat_t>>(); P_ZtZj_ = std::map<data_size_t, std::vector<sp_mat_t>>(); } //Identity matrices for Gaussian data if (!gauss_likelihood_before && gauss_likelihood_) { InitializeIdentityMatricesForGaussianData(); } else if (gauss_likelihood_before && !gauss_likelihood_) { //Delete not required matrices Id_ = std::map<data_size_t, T_mat>(); P_Id_ = std::map<data_size_t, T_mat>(); //Id_cs_ = std::map<data_size_t, cs>();//currently not used } InitializeLikelihoods(likelihood); DetermineCovarianceParameterIndicesNumCovPars(); } /*! * \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent * Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients * If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations * \param y_data Response variable data * \param covariate_data Covariate data (=independent variables, features). Set to nullptr if there is no covariate data * \param num_covariates Number of covariates * \param[out] optim_cov_pars Optimal covariance parameters * \param[out] optim_coef Optimal regression coefficients * \param[out] num_it Number of iterations * \param init_cov_pars Initial values for covariance parameters of RE components * \param init_coef Initial values for the regression coefficients (can be nullptr) * \param lr_coef Learning rate for fixed-effect linear coefficients * \param lr_cov Learning rate for covariance parameters. If lr<= 0, default values are used. Default value = 0.1 for "gradient_descent" and 1. for "fisher_scoring" * \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0) (default = 0.5) * \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0) (default = 0.5) * \param momentum_offset Number of iterations for which no mometum is applied in the beginning (default = 2) * \param max_iter Maximal number of iterations (default = 1000) * \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value (default = 1.0e-6) * \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true, only used for "gradient_descent" * \param nesterov_schedule_version Which version of Nesterov schedule should be used (default = 0) * \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring" (=default) * \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares, =default) * \param[out] std_dev_cov_par Standard deviations for the covariance parameters (default = nullptr) * \param[out] std_dev_coef Standard deviations for the coefficients (default = nullptr) * \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information (default = false) * \param convergence_criterion The convergence criterion used for terminating the optimization algorithm. Options: "relative_change_in_log_likelihood" (default) or "relative_change_in_parameters" * \param fixed_effects Externally provided fixed effects component of location parameter (can be nullptr, only used for non-Gaussian data) * \param learn_covariance_parameters If true, covariance parameters are estimated (default = true) */ void OptimLinRegrCoefCovPar(const double* y_data, const double* covariate_data, int num_covariates, double* optim_cov_pars, double* optim_coef, int& num_it, double* init_cov_pars, double* init_coef, double lr_coef, double lr_cov, double acc_rate_coef, double acc_rate_cov, int momentum_offset, int max_iter, double delta_rel_conv, bool use_nesterov_acc, int nesterov_schedule_version , string_t optimizer_cov, string_t optimizer_coef, double* std_dev_cov_par, double* std_dev_coef, bool calc_std_dev, string_t convergence_criterion, const double* fixed_effects, bool learn_covariance_parameters) { //std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();// Only for debugging // Some checks if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) { Log::REFatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str()); } if (SUPPORTED_CONV_CRIT_.find(convergence_criterion) == SUPPORTED_CONV_CRIT_.end()) { Log::REFatal("Convergence criterion '%s' is not supported.", convergence_criterion.c_str()); } if (!gauss_likelihood_) { if (optimizer_cov == "fisher_scoring") { Log::REFatal("Optimizer option '%s' is not supported for covariance parameters for non-Gaussian data. ", optimizer_cov.c_str()); } } if (covariate_data != nullptr) { if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end()) { Log::REFatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str()); } if (!gauss_likelihood_ && optimizer_coef == "wls") { Log::REFatal("Optimizer option '%s' is not supported for linear regression coefficients for non-Gaussian data.", optimizer_coef.c_str()); } } if (gauss_likelihood_ && fixed_effects != nullptr) { Log::REFatal("Additional external fixed effects in 'fixed_effects' can currently only be used for non-Gaussian data"); } // Initialization of variables if (covariate_data == nullptr) { has_covariates_ = false; } else { has_covariates_ = true; } bool use_nesterov_acc_coef = use_nesterov_acc; if (optimizer_cov != "gradient_descent") { use_nesterov_acc = false;//Nesterov acceleration is only used for gradient descent, not for Fisher scoring } if (optimizer_coef != "gradient_descent") { use_nesterov_acc_coef = false;//Nesterov acceleration is only used for gradient descent, not for Fisher scoring } bool terminate_optim = false; num_it = max_iter; bool profile_out_marginal_variance = (optimizer_cov == "gradient_descent" && gauss_likelihood_); // Profiling out sigma (=use closed-form expression for error / nugget variance) is better for gradient descent for Gaussian data // (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...) bool has_intercept = false; bool only_intercept_for_GPBoost_algo = false; int intercept_col = -1; // Check whether one of the columns contains only 1's and if not, make warning if (has_covariates_) { num_coef_ = num_covariates; X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_); vec_t vec_ones(num_data_); vec_ones.setOnes(); for (int icol = 0; icol < num_coef_; ++icol) { if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) { has_intercept = true; intercept_col = icol; break; } } if (!has_intercept) { Log::REWarning("The covariate data contains no column of ones, i.e., no intercept is included."); } only_intercept_for_GPBoost_algo = has_intercept && num_coef_ == 1 && !learn_covariance_parameters; } // Assume that this function is only called for initialization of the GPBoost algorithm // when (i) there is only an intercept (and not other covariates) and (ii) the covariance parameters are not learned const double* fixed_effects_ptr = fixed_effects; // Initialization of covariance parameters related variables if (lr_cov < 0.) {//a value below 0 indicates that the default values should be used if (optimizer_cov == "fisher_scoring") { lr_cov = 1.; } else if (optimizer_cov == "gradient_descent") { lr_cov = 0.1; } } vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_); vec_t cov_pars_lag1 = vec_t(num_cov_par_);//used only if convergence_criterion == "relative_change_in_parameters" vec_t cov_pars_after_grad_aux;//auxiliary variable used only if use_nesterov_acc == true vec_t cov_pars_after_grad_aux_lag1 = cov_pars;//auxiliary variable used only if use_nesterov_acc == true // Set response variabla data (if needed) if ((!has_covariates_ || !gauss_likelihood_) && y_data != nullptr) { SetY(y_data); } if (!has_covariates_ || !gauss_likelihood_) { CHECK(y_has_been_set_);//response variable data needs to have been set at this point for non-Gaussian data and for Gaussian data without covariates } if (gauss_likelihood_) { CHECK(y_data != nullptr); // Copy of response data (used only for Gaussian data and if there are also linear covariates since then y_ is modified during the optimization algorithm and this contains the original data) y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_); } // Initialization of linear regression coefficients related variables vec_t beta, beta_lag1, beta_after_grad_aux, beta_after_grad_aux_lag1, fixed_effects_vec, loc_transf, scale_transf; bool scale_covariables = false; if (has_covariates_) { scale_covariables = (optimizer_coef == "gradient_descent") && !only_intercept_for_GPBoost_algo; // Scale covariates (in order that the gradient is less sample-size dependent) if (scale_covariables) { X_orig_ = X_; loc_transf = vec_t(num_coef_); scale_transf = vec_t(num_coef_); vec_t col_i_centered; for (int icol = 0; icol < num_coef_; ++icol) { if (!has_intercept || icol != intercept_col) { loc_transf[icol] = X_.col(icol).mean(); col_i_centered = X_.col(icol); col_i_centered.array() -= loc_transf[icol]; scale_transf[icol] = std::sqrt(col_i_centered.array().square().sum()); X_.col(icol) = col_i_centered / scale_transf[icol]; } } if (has_intercept) { if (gauss_likelihood_) { scale_transf[intercept_col] = 1.; } else { // Scale also the intercept in order that the different gradient components are approximately of the same magnitude // For Gaussian data, this is not done as it seems that this is not advantageous scale_transf[intercept_col] = num_data_; X_.col(intercept_col).array() = 1. / scale_transf[intercept_col]; } } } beta = vec_t(num_covariates); if (init_coef == nullptr) { beta.setZero(); } else { beta = Eigen::Map<const vec_t>(init_coef, num_covariates); } beta_after_grad_aux_lag1 = beta; UpdateFixedEffects(beta, fixed_effects, fixed_effects_vec); if (!gauss_likelihood_) { fixed_effects_ptr = fixed_effects_vec.data(); } }//end if has_covariates_ Log::REDebug("Initial covariance parameters"); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); } if (has_covariates_) { Log::REDebug("Initial linear regression coefficients"); for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); } } // Initialize optimizer: // - factorize the covariance matrix (Gaussian data) or calculate the posterior mode of the random effects for use in the Laplace approximation (non-Gaussian data) // - calculate initial value of objective function CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_ptr); // TODO: for likelihood evaluation we don't need y_aux = Psi^-1 * y but only Psi^-0.5 * y. So, if has_covariates_==true, we might skip this step here and save some time if (std::isnan(neg_log_likelihood_) || std::isinf(neg_log_likelihood_)) { if (gauss_likelihood_) { Log::REFatal("NaN or Inf occurred in negative log-likelihood for intial parameters. You might try providing other initial values."); } else { Log::REFatal("NaN or Inf occurred in approximate negative marginal log-likelihood for intial parameters. You might try providing other initial values."); } } if (gauss_likelihood_) { Log::REDebug("Initial negative log-likelihood: %g", neg_log_likelihood_); } else { Log::REDebug("Initial approximate negative marginal log-likelihood: %g", neg_log_likelihood_); } if (optimizer_cov == "nelder_mead") { OptimExternal(cov_pars, beta, fixed_effects, max_iter, delta_rel_conv, num_it, learn_covariance_parameters); } else { // Start optimization for (int it = 0; it < max_iter; ++it) { neg_log_likelihood_lag1_ = neg_log_likelihood_; cov_pars_lag1 = cov_pars; // Update linear regression coefficients using gradient descent or generalized least squares (the latter option only for Gaussian data) if (has_covariates_) { beta_lag1 = beta; if (optimizer_coef == "gradient_descent") {// one step of gradient descent vec_t grad_beta; // Calculate gradient for linear regression coefficients CalcLinCoefGrad(cov_pars[0], beta, grad_beta, fixed_effects_ptr); // Update linear regression coefficients, apply step size safeguard, and recalculate mode for Laplace approx. (only for non-Gaussian data) UpdateLinCoef(beta, grad_beta, lr_coef, cov_pars, use_nesterov_acc_coef, it, beta_after_grad_aux, beta_after_grad_aux_lag1, acc_rate_coef, nesterov_schedule_version, momentum_offset, fixed_effects, fixed_effects_vec); fixed_effects_ptr = fixed_effects_vec.data(); } else if (optimizer_coef == "wls") {// coordinate descent using generalized least squares (only for Gaussian data) CHECK(gauss_likelihood_); SetY(y_vec_.data()); CalcYAux(); UpdateCoefGLS(X_, beta); // Set resid for updating covariance parameters vec_t resid = y_vec_ - (X_ * beta); SetY(resid.data()); EvalNegLogLikelihoodOnlyUpdateFixedEffects(cov_pars.data(), neg_log_likelihood_after_lin_coef_update_); } }//end has_covariates_ else { neg_log_likelihood_after_lin_coef_update_ = neg_log_likelihood_lag1_; } // end update regression coefficients // Update covariance parameters using one step of gradient descent or Fisher scoring if (learn_covariance_parameters) { // Calculate gradient or natural gradient = FI^-1 * grad (for Fisher scoring) vec_t nat_grad; // nat_grad = grad for gradient descent and nat_grad = FI^-1 * grad for Fisher scoring (="natural" gradient) if (optimizer_cov == "gradient_descent") {//gradient descent if (gauss_likelihood_) { // Profile out sigma2 (=use closed-form expression for error / nugget variance) since this is better for gradient descent (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...) cov_pars[0] = yTPsiInvy_ / num_data_; } CalcCovParGrad(cov_pars, nat_grad, false, false, fixed_effects_ptr); } else if (optimizer_cov == "fisher_scoring") {//Fisher scoring // We don't profile out sigma2 (=don't use closed-form expression for error / nugget variance) since this is better for Fisher scoring (otherwise much more iterations are needed) vec_t grad; den_mat_t FI; CalcCovParGrad(cov_pars, grad, true, true, fixed_effects_ptr); CalcFisherInformation(cov_pars, FI, true, true, true); nat_grad = FI.llt().solve(grad); } // Update covariance parameters, apply step size safeguard, factorize covariance matrix, and calculate new value of objective function UpdateCovPars(cov_pars, nat_grad, lr_cov, profile_out_marginal_variance, use_nesterov_acc, it, optimizer_cov, cov_pars_after_grad_aux, cov_pars_after_grad_aux_lag1, acc_rate_cov, nesterov_schedule_version, momentum_offset, fixed_effects_ptr); // Check for NA or Inf if (std::isnan(cov_pars[0]) || std::isinf(cov_pars[0])) { Log::REFatal("NaN or Inf occurred in covariance parameter optimization. If this is a problem, consider doing the following. If you have used Fisher scoring, try using gradient descent. If you have used gradient descent, consider using a smaller learning rate."); } } else { neg_log_likelihood_ = neg_log_likelihood_after_lin_coef_update_; } // end update covariance parameters // Check convergence bool likelihood_is_na = std::isnan(neg_log_likelihood_) || std::isinf(neg_log_likelihood_);//if the likelihood is NA, we monitor the parameters instead of the likelihood if (convergence_criterion == "relative_change_in_parameters" || likelihood_is_na) { if (has_covariates_) { if (((beta - beta_lag1).norm() < delta_rel_conv * beta_lag1.norm()) && ((cov_pars - cov_pars_lag1).norm() < delta_rel_conv * cov_pars_lag1.norm())) { terminate_optim = true; } } else { if ((cov_pars - cov_pars_lag1).norm() < delta_rel_conv * cov_pars_lag1.norm()) { terminate_optim = true; } } } else if (convergence_criterion == "relative_change_in_log_likelihood") { if ((neg_log_likelihood_lag1_ - neg_log_likelihood_) < delta_rel_conv * std::abs(neg_log_likelihood_lag1_)) { terminate_optim = true; } } // Output for debugging if ((it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) && (it != (max_iter - 1))) { Log::REDebug("GPModel parameter optimization iteration number %d", it + 1); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); } for (int i = 0; i < std::min((int)beta.size(), 5); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); } if (has_covariates_ && beta.size() > 5) { Log::REDebug("Note: only the first 5 linear regression coefficients are shown"); } if (gauss_likelihood_) { Log::REDebug("Negative log-likelihood: %g", neg_log_likelihood_); } else { Log::REDebug("Approximate negative marginal log-likelihood: %g", neg_log_likelihood_); } } // Check whether to terminate if (terminate_optim) { num_it = it + 1; break; } }//end for loop for optimization } if (num_it == max_iter) { Log::REDebug("GPModel: no convergence after the maximal number of iterations"); } else { Log::REDebug("GPModel parameter estimation finished after %d iteration", num_it); } for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); } for (int i = 0; i < std::min((int)beta.size(), 5); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); } if (gauss_likelihood_) { Log::REDebug("Negative log-likelihood: %g", neg_log_likelihood_); } else { Log::REDebug("Approximate negative marginal log-likelihood: %g", neg_log_likelihood_); } for (int i = 0; i < num_cov_par_; ++i) { optim_cov_pars[i] = cov_pars[i]; } if (has_covariates_) { if (scale_covariables) { // transform coefficients back to original scale if (has_intercept && !gauss_likelihood_) { beta[intercept_col] /= scale_transf[intercept_col]; } for (int icol = 0; icol < num_coef_; ++icol) { if (!has_intercept || icol != intercept_col) { beta[icol] /= scale_transf[icol]; if (has_intercept) { beta[intercept_col] -= beta[icol] * loc_transf[icol]; } } } X_ = X_orig_; } for (int i = 0; i < num_covariates; ++i) { optim_coef[i] = beta[i]; } } if (calc_std_dev) { vec_t std_dev_cov(num_cov_par_); if (gauss_likelihood_) { CalcStdDevCovPar(cov_pars, std_dev_cov);//TODO: maybe another call to CalcCovFactor can be avoided in CalcStdDevCovPar (need to take care of cov_pars[0]) for (int i = 0; i < num_cov_par_; ++i) { std_dev_cov_par[i] = std_dev_cov[i]; } } else { std_dev_cov.setZero();// Calculation of standard deviations for covariance paramters is not supported for non-Gaussian data if (!has_covariates_) { Log::REWarning("Calculation of standard deviations of covariance parameters for non-Gaussian data is (currently) not supported."); } } if (has_covariates_) { vec_t std_dev_beta(num_covariates); if (gauss_likelihood_) { CalcStdDevCoef(cov_pars, X_, std_dev_beta); } else { Log::REWarning("Standard deviations of linear regression coefficients for non-Gaussian data can be very approximative. The only reason for reporting them is that other software packages for generalized linear mixed effects are also doing this."); CalcStdDevCoefNonGaussian(num_covariates, beta, cov_pars, fixed_effects, std_dev_beta); } for (int i = 0; i < num_covariates; ++i) { std_dev_coef[i] = std_dev_beta[i]; } } } if (has_covariates_) { if (only_intercept_for_GPBoost_algo) { has_covariates_ = false; // When this function is only called for initialization of the GPBoost algorithm, // we set has_covariates_ to false in order to avoid potential problems when making predictions with the GPBoostOOS algorithm, // since in the second phase of the GPBoostOOS algorithm covariance parameters are not estimated (and thus has_covariates_ is not set to false) // but this function is called for initialization of the GPBoost algorithm. } } //std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();// Only for debugging //double el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()) / 1000000.;// Only for debugging //Log::REInfo("Time for optimization: %g", el_time);// Only for debugging }//end OptimLinRegrCoefCovPar /*! * \brief Profile out sigma2 (=use closed-form expression for error / nugget variance) (only used in EvalLLforOptimLib) * \return sigma2_ */ double ProfileOutSigma2() { sigma2_ = yTPsiInvy_ / num_data_; return sigma2_; } /*! * \brief Return value of neg_log_likelihood_ (only used in EvalLLforOptimLib) * \return neg_log_likelihood_ */ double GetNegLogLikelihood() { return neg_log_likelihood_; } /*! * \brief Return num_cov_par_ (only used in EvalLLforOptimLib) * \return num_cov_par_ */ int GetNumCovPar() { return num_cov_par_; } /*! * \brief Return has_covariates_ (only used in EvalLLforOptimLib) * \return has_covariates_ */ bool HasCovariates() { return has_covariates_; } /*! * \brief Factorize the covariance matrix (Gaussian data) or * calculate the posterior mode of the random effects for use in the Laplace approximation (non-Gaussian data) * And calculate the negative log-likelihood (Gaussian data) or the negative approx. marginal log-likelihood (non-Gaussian data) * \param cov_pars Covariance parameters * \param fixed_effects Fixed effects component of location parameter */ void CalcCovFactorOrModeAndNegLL(const vec_t& cov_pars, const double* fixed_effects) { SetCovParsComps(cov_pars); if (gauss_likelihood_) { CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used) if (only_grouped_REs_use_woodbury_identity_) { CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else { CalcYAux();//y_aux = Psi^-1 * y } EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_, true, true, true); }//end gauss_likelihood_ else {//not gauss_likelihood_ if (vecchia_approx_) { CalcCovFactor(true, true, 1., false); } else { CalcSigmaComps(); CalcCovMatrixNonGauss(); } neg_log_likelihood_ = -CalcModePostRandEff(fixed_effects);//calculate mode and approximate marginal likelihood }//end not gauss_likelihood_ }//end CalcCovFactorOrModeAndNegLL /*! * \brief Update fixed effects with new linear regression coefficients * \param beta Linear regression coefficients * \param fixed_effects Externally provided fixed effects component of location parameter (only used for non-Gaussian data) * \param fixed_effects_vec[out] Vector of fixed effects (used only for non-Gaussian data) */ void UpdateFixedEffects(const vec_t& beta, const double* fixed_effects, vec_t& fixed_effects_vec) { if (gauss_likelihood_) { vec_t resid = y_vec_ - (X_ * beta); SetY(resid.data()); } else { fixed_effects_vec = X_ * beta; if (fixed_effects != nullptr) {//add external fixed effects to linear predictor #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_; ++i) { fixed_effects_vec[i] += fixed_effects[i]; } } } } /*! * \brief Calculate the value of the negative log-likelihood * \param y_data Response variable data * \param cov_pars Values for covariance parameters of RE components * \param[out] negll Negative log-likelihood * \param CalcCovFactor_already_done If true, it is assumed that the covariance matrix has already been factorized * \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant if not only_grouped_REs_use_woodbury_identity_) * \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_) */ void EvalNegLogLikelihood(const double* y_data, const double* cov_pars, double& negll, bool CalcCovFactor_already_done, bool CalcYAux_already_done, bool CalcYtilde_already_done) { CHECK(!(CalcYAux_already_done && !CalcCovFactor_already_done));// CalcYAux_already_done && !CalcCovFactor_already_done makes no sense if (y_data != nullptr) { SetY(y_data); } if (!CalcCovFactor_already_done) { const vec_t cov_pars_vec = Eigen::Map<const vec_t>(cov_pars, num_cov_par_); SetCovParsComps(cov_pars_vec); CalcCovFactor(false, true, 1., false);//Create covariance matrix and factorize it } //Calculate quadratic form y^T Psi^-1 y CalcYTPsiIInvY<T_mat>(yTPsiInvy_, true, 1, CalcYAux_already_done, CalcYtilde_already_done); //Calculate log determinant log_det_Psi_ = 0; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { log_det_Psi_ -= D_inv_[cluster_i].diagonal().array().log().sum(); } else { if (only_grouped_REs_use_woodbury_identity_) { if (num_re_group_total_ == 1 && num_comps_total_ == 1) { log_det_Psi_ += (2. * sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().log().sum()); } else { log_det_Psi_ += (2. * chol_facts_[cluster_i].diagonal().array().log().sum()); } for (int j = 0; j < num_comps_total_; ++j) { int num_rand_eff = cum_num_rand_eff_[cluster_i][j + 1] - cum_num_rand_eff_[cluster_i][j]; log_det_Psi_ += (num_rand_eff * std::log(re_comps_[cluster_i][j]->cov_pars_[0])); } } else { log_det_Psi_ += (2. * chol_facts_[cluster_i].diagonal().array().log().sum()); } } } negll = yTPsiInvy_ / 2. / cov_pars[0] + log_det_Psi_ / 2. + num_data_ / 2. * (std::log(cov_pars[0]) + std::log(2 * M_PI)); }//end EvalNegLogLikelihood /*! * \brief Calculate the value of the negative log-likelihood when yTPsiInvy_ and log_det_Psi_ is already known * \param sigma2 Nugget / error term variance * \param[out] negll Negative log-likelihood */ void EvalNegLogLikelihoodOnlyUpdateNuggetVariance(const double sigma2, double& negll) { negll = yTPsiInvy_ / 2. / sigma2 + log_det_Psi_ / 2. + num_data_ / 2. * (std::log(sigma2) + std::log(2 * M_PI)); }//end EvalNegLogLikelihoodOnlyUpdateNuggetVariance /*! * \brief Calculate the value of the negative log-likelihood when only the fixed effects part has changed and the covariance matrix has not changed * Note: It is assuzmed that y_ has been set before by calling 'SetY' with the residuals = y - fixed_effcts * \param cov_pars Values for covariance parameters of RE components * \param[out] negll Negative log-likelihood */ void EvalNegLogLikelihoodOnlyUpdateFixedEffects(const double* cov_pars, double& negll) { // Calculate y_aux = Psi^-1 * y (if not only_grouped_REs_use_woodbury_identity_) or y_tilde and y_tilde2 (if only_grouped_REs_use_woodbury_identity_) for covariance parameter update (only for Gaussian data) if (only_grouped_REs_use_woodbury_identity_) { CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else { CalcYAux();//y_aux = Psi^-1 * y } //Calculate quadratic form y^T Psi^-1 y CalcYTPsiIInvY<T_mat>(yTPsiInvy_, true, 1, false, false); negll = yTPsiInvy_ / 2. / cov_pars[0] + log_det_Psi_ / 2. + num_data_ / 2. * (std::log(cov_pars[0]) + std::log(2 * M_PI)); } /*! * \brief Calculate the value of the approximate negative marginal log-likelihood obtained when using the Laplace approximation * \param y_data Response variable data * \param cov_pars Values for covariance parameters of RE components * \param[out] negll Approximate negative marginal log-likelihood * \param fixed_effects Fixed effects component of location parameter * \param InitializeModeCovMat If true, posterior mode is initialized to 0 and the covariance matrix is calculated. Otherwise, existing values are used * \param CalcModePostRandEff_already_done If true, it is assumed that the posterior mode of the random effects has already been calculated */ void EvalLAApproxNegLogLikelihood(const double* y_data, const double* cov_pars, double& negll, const double* fixed_effects = nullptr, bool InitializeModeCovMat = true, bool CalcModePostRandEff_already_done = false) { if (y_data != nullptr) { SetY(y_data); } else { if (!CalcModePostRandEff_already_done) { CHECK(y_has_been_set_); } } if (InitializeModeCovMat) { CHECK(cov_pars != nullptr); } if (CalcModePostRandEff_already_done) { negll = neg_log_likelihood_;//Whenever the mode is calculated that likelihood is calculated as well. So we might as well just return the saved neg_log_likelihood_ } else {//not CalcModePostRandEff_already_done if (InitializeModeCovMat) { //We reset the initial modes to 0. This is done to avoid that different calls to EvalLAApproxNegLogLikelihood lead to (very small) differences. for (const auto& cluster_i : unique_clusters_) { likelihood_[cluster_i]->InitializeModeAvec();//TODO: maybe ommit this step? } const vec_t cov_pars_vec = Eigen::Map<const vec_t>(cov_pars, num_cov_par_); SetCovParsComps(cov_pars_vec); if (vecchia_approx_) { CalcCovFactor(true, true, 1., false); } else { CalcSigmaComps(); CalcCovMatrixNonGauss(); } }//end InitializeModeCovMat negll = -CalcModePostRandEff(fixed_effects); }//end not CalcModePostRandEff_already_done }//end EvalLAApproxNegLogLikelihood /*! * \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost) * \param num_data_pred Number of data points for which predictions are made * \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made * \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients * \param gp_coords_data_pred Coordinates (features) for Gaussian process * \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients * \param covariate_data_pred Covariate data (=independent variables, features) for prediction */ void SetPredictionData(int num_data_pred, const data_size_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr, const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr, const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) { CHECK(num_data_pred > 0); if (cluster_ids_data_pred == nullptr) { cluster_ids_data_pred_.clear(); } else { cluster_ids_data_pred_ = std::vector<data_size_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred); } if (re_group_data_pred == nullptr) { re_group_levels_pred_.clear(); } else { //For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred' re_group_levels_pred_ = std::vector<std::vector<re_group_t>>(num_re_group_, std::vector<re_group_t>(num_data_pred)); ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_); } if (re_group_rand_coef_data_pred == nullptr) { re_group_rand_coef_data_pred_.clear(); } else { re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_); } if (gp_coords_data_pred == nullptr) { gp_coords_data_pred_.clear(); } else { gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_); } if (gp_rand_coef_data_pred == nullptr) { gp_rand_coef_data_pred_.clear(); } else { gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_); } if (covariate_data_pred == nullptr) { covariate_data_pred_.clear(); } else { covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_); } num_data_pred_ = num_data_pred; }//end SetPredictionData /*! * \brief Make predictions: calculate conditional mean and variances or covariance matrix * Note: You should pre-allocate memory for out_predict * Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat==false && predict_var==false) * or num_data_pred * (1 + num_data_pred) if the predictive covariance matrix is also calculated (predict_cov_mat==true) * or num_data_pred * 2 if predictive variances are also calculated (predict_var==true) * \param cov_pars_pred Covariance parameters of components * \param y_obs Response variable for observed data * \param num_data_pred Number of data points for which predictions are made * \param[out] out_predict Predictive/conditional mean at prediciton points followed by the predictive covariance matrix in column-major format (if predict_cov_mat==true) or the predictive variances (if predict_var==true) * \param calc_cov_factor If true, the covariance matrix of the observed data is factorized otherwise a previously done factorization is used (default=true) * \param predict_cov_mat If true, the predictive/conditional covariance matrix is calculated (default=false) (predict_var and predict_cov_mat cannot be both true) * \param predict_var If true, the predictive/conditional variances are calculated (default=false) (predict_var and predict_cov_mat cannot be both true) * \param predict_response If true, the response variable (label) is predicted, otherwise the latent random effects (this is only relevant for non-Gaussian data) (default=false) * \param covariate_data_pred Covariate data (=independent variables, features) for prediction * \param coef_pred Coefficients for linear covariates * \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made * \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients * \param gp_coords_data_pred Coordinates (features) for Gaussian process * \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients * \param use_saved_data If true, saved data is used and some arguments are ignored * \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points * \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used) * \param fixed_effects Fixed effects component of location parameter for observed data (only used for non-Gaussian data) * \param fixed_effects_pred Fixed effects component of location parameter for predicted data (only used for non-Gaussian data) */ void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred, double* out_predict, bool calc_cov_factor = true, bool predict_cov_mat = false, bool predict_var = false, bool predict_response = false, const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr, const data_size_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr, const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr, const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1, const double* fixed_effects = nullptr, const double* fixed_effects_pred = nullptr) { //First check whether previously set data should be used and load it if required std::vector<std::vector<re_group_t>> re_group_levels_pred, re_group_levels_pred_orig;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j) // Note: re_group_levels_pred_orig is only used for the case (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) // since then re_group_levels_pred is over-written for every cluster and the original data thus needs to be saved if (use_saved_data) { if (num_data_pred > 0) { CHECK(num_data_pred == num_data_pred_); } else { num_data_pred = num_data_pred_; } re_group_levels_pred = re_group_levels_pred_; if (cluster_ids_data_pred_.empty()) { cluster_ids_data_pred = nullptr; } else { cluster_ids_data_pred = cluster_ids_data_pred_.data(); } if (re_group_rand_coef_data_pred_.empty()) { re_group_rand_coef_data_pred = nullptr; } else { re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data(); } if (gp_coords_data_pred_.empty()) { gp_coords_data_pred = nullptr; } else { gp_coords_data_pred = gp_coords_data_pred_.data(); } if (gp_rand_coef_data_pred_.empty()) { gp_rand_coef_data_pred = nullptr; } else { gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data(); } if (covariate_data_pred_.empty()) { covariate_data_pred = nullptr; } else { covariate_data_pred = covariate_data_pred_.data(); } }// end use_saved_data else { if (re_group_data_pred != nullptr) { //For grouped random effects: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred' re_group_levels_pred = std::vector<std::vector<re_group_t>>(num_re_group_, std::vector<re_group_t>(num_data_pred)); ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred); } } if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { re_group_levels_pred_orig = re_group_levels_pred; } //Some checks CHECK(num_data_pred > 0); //Check whether required data is missing if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) { Log::REFatal("Missing covariate data for random coefficients for grouped random effects for making predictions"); } if (gp_coords_data_pred == nullptr && num_gp_ > 0) { Log::REFatal("Missing coordinate data for Gaussian process for making predictions"); } if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) { Log::REFatal("Missing covariate data for random coefficients for Gaussian process for making predictions"); } if (cluster_ids_data_pred == nullptr && num_clusters_ > 1) { Log::REFatal("Missing cluster_id data for making predictions"); } if (!gauss_likelihood_ && predict_response && predict_cov_mat) { Log::REFatal("Calculation of the predictive covariance matrix is not supported when predicting the response variable (label) for non-Gaussian data"); } if (predict_cov_mat && predict_var) { Log::REFatal("Calculation of both the predictive covariance matrix and variances is not supported. Choose one of these option (predict_cov_mat or predict_var)"); } if (vecchia_approx_ && gauss_likelihood_ && predict_var) { Log::REDebug("Calculation of only predictive variances is currently not optimized for the Vecchia approximation. If you need only variances and this takes too much time or memory, contact the developer or open a GitHub issue."); } if (has_covariates_) { CHECK(covariate_data_pred != nullptr); CHECK(coef_pred != nullptr); } if (y_obs == nullptr) { if (!y_has_been_set_) { Log::REFatal("Response variable data is not provided and has not been set before"); } } if (num_data_pred > 10000 && predict_cov_mat) { double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred); int mem_size = (int)(num_mem_d * 8. / 1000000.); Log::REWarning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package or open a GitHub issue and ask to implement this feature.", num_data_pred, mem_size); } if (vecchia_approx_) { if (vecchia_pred_type != nullptr) { string_t vecchia_pred_type_S = std::string(vecchia_pred_type); if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_S) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) { Log::REFatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_S.c_str()); } vecchia_pred_type_ = vecchia_pred_type_S; } if (num_neighbors_pred > 0) { num_neighbors_pred_ = num_neighbors_pred; } } // Initialize linear predictor related terms and covariance parameters vec_t coef, mu;//mu = linear regression predictor if (has_covariates_) {//calculate linear regression term coef = Eigen::Map<const vec_t>(coef_pred, num_coef_); den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_); mu = X_pred * coef; } vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_); //Set up cluster IDs std::map<data_size_t, int> num_data_per_cluster_pred; std::map<data_size_t, std::vector<int>> data_indices_per_cluster_pred; std::vector<data_size_t> unique_clusters_pred; data_size_t num_clusters_pred; SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred); //Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made bool pred_for_observed_data = false; for (const auto& cluster_i : unique_clusters_pred) { if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) { pred_for_observed_data = true; break; } } //Factorize covariance matrix and calculate Psi^{-1}y_obs or calculate Laplace approximation (if required) const double* fixed_effects_ptr = fixed_effects; vec_t fixed_effects_vec; if (pred_for_observed_data) { // Set response data and fixed effects if (gauss_likelihood_) { if (has_covariates_ || fixed_effects != nullptr) { vec_t resid; if (y_obs != nullptr) { resid = Eigen::Map<const vec_t>(y_obs, num_data_); } else { resid = y_vec_; } if (has_covariates_) { resid -= X_ * coef; } //add external fixed effects to linear predictor if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_; ++i) { resid[i] -= fixed_effects[i]; } } SetY(resid.data()); }//end if has_covariates_ else {//no covariates if (y_obs != nullptr) { SetY(y_obs); } }//end no covariates }//end if gauss_likelihood_ else {//if not gauss_likelihood_ if (has_covariates_) { fixed_effects_vec = X_ * coef; //add external fixed effects to linear predictor if (fixed_effects != nullptr) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_; ++i) { fixed_effects_vec[i] += fixed_effects[i]; } } fixed_effects_ptr = fixed_effects_vec.data(); } if (y_obs != nullptr) { SetY(y_obs); } }//end if not gauss_likelihood_ //TODO (low prio): the factorization needs to be done only for the GP realizations / clusters for which predictions are made (currently it is done for all) SetCovParsComps(cov_pars); if (!(vecchia_approx_ && gauss_likelihood_)) {// no need to call CalcCovFactor here for the Vecchia approximation for Gaussian data, this is done in the prediction steps below if (calc_cov_factor) { if (gauss_likelihood_) { CalcCovFactor(false, true, 1., false);// Create covariance matrix and factorize it } else {//not gauss_likelihood_ //We reset the initial modes to 0. This is done to avoid that different calls to the prediction function lead to (very small) differences // as the mode is calculated from different starting values. // If one is willing to accept these (very) small differences, one could disable this with the advantage of having faster predictions // as the mode does not need to be found anew. for (const auto& cluster_i : unique_clusters_) { likelihood_[cluster_i]->InitializeModeAvec(); } if (vecchia_approx_) { CalcCovFactor(false, true, 1., false); } else { CalcSigmaComps(); CalcCovMatrixNonGauss(); } CalcModePostRandEff(fixed_effects_ptr); }//end not gauss_likelihood_ }//end if calc_cov_factor if (gauss_likelihood_) { CalcYAux();//note: in some cases a call to CalcYAux() could be avoided (e.g. no covariates and not GPBoost algorithm)... } }//end not (vecchia_approx_ && gauss_likelihood_) }//end if pred_for_observed_data (factorizatiion of covariance matrix) // Loop over different clusters to calculate predictions for (const auto& cluster_i : unique_clusters_pred) { //Case 1: no data observed for this Gaussian process with ID 'cluster_i' if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) { T_mat psi; std::vector<std::shared_ptr<RECompBase<T_mat>>> re_comps_cluster_i; int num_REs_pred = num_data_per_cluster_pred[cluster_i]; //Calculate covariance matrix if needed if (predict_cov_mat || predict_var || predict_response) { if (vecchia_approx_) { //TODO: move this code out into another function for better readability // Initialize RE components std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<Triplet_t> entries_init_B_cluster_i; std::vector<Triplet_t> entries_init_B_grad_cluster_i; std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred, gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, cov_fct_taper_range_, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors) for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); re_comps_cluster_i[j]->SetCovPars(pars); } // Calculate a Cholesky factor sp_mat_t B_cluster_i; sp_mat_t D_inv_cluster_i; std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i); //Calculate Psi sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); D_sqrt.setIdentity(); D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5); sp_mat_t B_inv_D_sqrt; eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true); psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose(); }//end vecchia_approx_ else {//not vecchia_approx_ CreateREComponents(num_data_pred, num_re_group_, data_indices_per_cluster_pred, cluster_i, re_group_levels_pred, num_data_per_cluster_pred, num_re_group_rand_coef_, re_group_rand_coef_data_pred, ind_effect_group_rand_coef_, num_gp_, gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, cov_fct_taper_range_, ind_intercept_gp_, true, re_comps_cluster_i); if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) { num_REs_pred = re_comps_cluster_i[0]->GetNumUniqueREs(); } else { num_REs_pred = num_data_per_cluster_pred[cluster_i]; } psi = T_mat(num_REs_pred, num_REs_pred); if (gauss_likelihood_) { psi.setIdentity();//nugget effect } else { psi.setZero(); } for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); re_comps_cluster_i[j]->SetCovPars(pars); re_comps_cluster_i[j]->CalcSigma(); psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get())); } }//end not vecchia_approx_ if (gauss_likelihood_) { psi *= cov_pars[0];//back-transform } }//end calculation of covariance matrix // Add external fixed_effects vec_t mean_pred_id = vec_t::Zero(num_data_per_cluster_pred[cluster_i]); if (fixed_effects_pred != nullptr) {//add externaly provided fixed effects #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { mean_pred_id[i] += fixed_effects_pred[data_indices_per_cluster_pred[cluster_i][i]]; } } // Add linear regression predictor if (has_covariates_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { mean_pred_id[i] += mu[data_indices_per_cluster_pred[cluster_i][i]]; } } bool predict_var_or_response = predict_var || (predict_response && !gauss_likelihood_); vec_t var_pred_id; if (predict_var_or_response) { var_pred_id = psi.diagonal(); } // Map from predictions from random effects scale b to "data scale" Zb if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) { if (predict_var_or_response) { vec_t var_pred_id_on_RE_scale = var_pred_id; var_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { var_pred_id[i] = var_pred_id_on_RE_scale[(re_comps_cluster_i[0]->random_effects_indices_of_data_)[i]]; } } if (predict_cov_mat) { T_mat cov_mat_pred_id_on_RE_scale = psi; sp_mat_t Zpred(num_data_per_cluster_pred[cluster_i], num_REs_pred); std::vector<Triplet_t> triplets(num_data_per_cluster_pred[cluster_i]); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { triplets[i] = Triplet_t(i, (re_comps_cluster_i[0]->random_effects_indices_of_data_)[i], 1.); } Zpred.setFromTriplets(triplets.begin(), triplets.end()); psi = Zpred * cov_mat_pred_id_on_RE_scale * Zpred.transpose(); } }//end only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_ // Transform to response scale for non-Gaussian data if needed if (!gauss_likelihood_ && predict_response) { likelihood_[unique_clusters_[0]]->PredictResponse(mean_pred_id, var_pred_id, predict_var); } // Write on output #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i]; } // Write covariance / variance on output if (!predict_response || gauss_likelihood_) {//this is not done if predict_response==true for non-Gaussian data if (predict_cov_mat) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i); } } }//end predict_cov_mat if (predict_var) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i]; } }//end predict_var }//end !predict_response || gauss_likelihood_ else { // predict_response && !gauss_likelihood_ if (predict_var) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i]; } }//end predict_var }//end write covariance / variance on output }//end cluster_i with no observed data else { //Case 2: there exists observed data for this cluster_i den_mat_t gp_coords_mat_pred; std::vector<data_size_t> random_effects_indices_of_data_pred; int num_REs_pred = num_data_per_cluster_pred[cluster_i]; if (num_gp_ > 0) { std::vector<double> gp_coords_pred; for (int j = 0; j < dim_gp_coords_; ++j) { for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]); } } gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_); } if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { // Determine unique group levels per cluster and create map which maps every data point per cluster to a group level random_effects_indices_of_data_pred = std::vector<data_size_t>(num_data_per_cluster_pred[cluster_i]); std::vector<re_group_t> re_group_levels_pred_unique; std::map<re_group_t, int> map_group_label_index_pred; int num_group_pred = 0; int ii = 0; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { if (map_group_label_index_pred.find(re_group_levels_pred_orig[0][id]) == map_group_label_index_pred.end()) { map_group_label_index_pred.insert({ re_group_levels_pred_orig[0][id], num_group_pred }); re_group_levels_pred_unique.push_back(re_group_levels_pred_orig[0][id]); random_effects_indices_of_data_pred[ii] = num_group_pred; num_group_pred += 1; } else { random_effects_indices_of_data_pred[ii] = map_group_label_index_pred[re_group_levels_pred_orig[0][id]]; } ii += 1; } re_group_levels_pred[0] = re_group_levels_pred_unique; num_REs_pred = (int)re_group_levels_pred[0].size(); }//end only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ else if (only_one_GP_calculations_on_RE_scale_) { random_effects_indices_of_data_pred = std::vector<data_size_t>(num_data_per_cluster_pred[cluster_i]); std::vector<int> uniques;//unique points std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates DetermineUniqueDuplicateCoords(gp_coords_mat_pred, num_data_per_cluster_pred[cluster_i], uniques, unique_idx); #pragma omp for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { random_effects_indices_of_data_pred[i] = unique_idx[i]; } den_mat_t gp_coords_mat_pred_unique = gp_coords_mat_pred(uniques, Eigen::all); gp_coords_mat_pred = gp_coords_mat_pred_unique; num_REs_pred = (int)gp_coords_mat_pred.rows(); }//end only_one_GP_calculations_on_RE_scale_ // Initialize predictive mean and covariance vec_t mean_pred_id; if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { mean_pred_id = vec_t(num_REs_pred); } else { mean_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]); } T_mat cov_mat_pred_id; vec_t var_pred_id; // Calculate predictions // Special case: Vecchia aproximation for Gaussian data if (vecchia_approx_ && gauss_likelihood_) {//TODO: move this code to another function for better readability std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][ind_intercept_gp_]); int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i]; double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot); int mem_size = (int)(num_mem_d * 8. / 1000000.); if (mem_size > 4000) { Log::REDebug("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package or open a GitHub issue and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size); } //TODO: implement a more efficient version when only predictive variances are required and not full covariance matrices bool predict_var_or_cov_mat = predict_var || predict_cov_mat; if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") { CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "order_obs_first_cond_all") { CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "order_pred_first") { CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") { CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") { CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id); } if (predict_var) { var_pred_id = cov_mat_pred_id.diagonal(); if (!predict_cov_mat) { cov_mat_pred_id.resize(0, 0); } } }//end (vecchia_approx_ && gauss_likelihood_) else {// not vecchia_approx_ or not gauss_likelihood_ // General case: either non-Gaussian data or Gaussian data without the Vecchia approximation // NOTE: if vecchia_approx_==true and gauss_likelihood_==false, the cross-covariance matrix Sigma_{1,2} = cov(x_pred,x) is not approximated but the exact version is used bool predict_var_or_response = predict_var || (predict_response && !gauss_likelihood_);//variance needs to be available for resposne prediction for non-Gaussian data CalcPred(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_group_levels_pred, re_group_rand_coef_data_pred, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, predict_var_or_response, mean_pred_id, cov_mat_pred_id, var_pred_id); //map from predictions from random effects scale b to "data scale" Zb if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { vec_t mean_pred_id_on_RE_scale = mean_pred_id; mean_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { mean_pred_id[i] = mean_pred_id_on_RE_scale[random_effects_indices_of_data_pred[i]]; } if (predict_var_or_response) { vec_t var_pred_id_on_RE_scale = var_pred_id; var_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]); #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { var_pred_id[i] = var_pred_id_on_RE_scale[random_effects_indices_of_data_pred[i]]; } } if (predict_cov_mat) { T_mat cov_mat_pred_id_on_RE_scale = cov_mat_pred_id; sp_mat_t Zpred(num_data_per_cluster_pred[cluster_i], num_REs_pred); std::vector<Triplet_t> triplets(num_data_per_cluster_pred[cluster_i]); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { triplets[i] = Triplet_t(i, random_effects_indices_of_data_pred[i], 1.); } Zpred.setFromTriplets(triplets.begin(), triplets.end()); cov_mat_pred_id = Zpred * cov_mat_pred_id_on_RE_scale * Zpred.transpose(); } }//end only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ }//end not vecchia_approx_ or not gauss_likelihood_ // Add externaly provided fixed effects if (fixed_effects_pred != nullptr) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { mean_pred_id[i] += fixed_effects_pred[data_indices_per_cluster_pred[cluster_i][i]]; } } // Add linear regression predictor if (has_covariates_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { mean_pred_id[i] += mu[data_indices_per_cluster_pred[cluster_i][i]]; } } if (!gauss_likelihood_ && predict_response) { likelihood_[unique_clusters_[0]]->PredictResponse(mean_pred_id, var_pred_id, predict_var); } // Write on output #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i]; } // Write covariance / variance on output if (predict_cov_mat) { if (gauss_likelihood_) { cov_mat_pred_id *= cov_pars[0]; } #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i); } } }//end predict_cov_mat if (predict_var) { if (gauss_likelihood_) { var_pred_id *= cov_pars[0]; } #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i]; } }//end predict_var //end write covariance / variance on output }//end cluster_i with data }//end loop over cluster //Set cross-covariances between different independent clusters to 0 if (predict_cov_mat && unique_clusters_pred.size() > 1 && (!predict_response || gauss_likelihood_)) { for (const auto& cluster_i : unique_clusters_pred) { for (const auto& cluster_j : unique_clusters_pred) { if (cluster_i != cluster_j) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_j]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_j][j] + num_data_pred] = 0.; } } } } } } }//end Predict /*! * \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale) * Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters) * \param y_data Response variable data * \param[out] init_cov_pars Initial values for covariance parameters of RE components */ void FindInitCovPar(const double* y_data, double* init_cov_pars) { double mean = 0; double var = 0; int ind_par; if (gauss_likelihood_) { //determine initial value for nugget effect for (int i = 0; i < num_data_; ++i) {//TODO: run in parallel mean += y_data[i]; } mean /= num_data_; for (int i = 0; i < num_data_; ++i) { var += (y_data[i] - mean) * (y_data[i] - mean); } var /= (num_data_ - 1); init_cov_pars[0] = var; ind_par = 1; }//end Gaussian data else {//non-Gaussian data ind_par = 0; } if (vecchia_approx_) {//Neither distances nor coordinates are saved for random coefficient GPs in the Vecchia approximation -> cannot find initial parameters -> just copy the ones from the intercept GP // find initial values for intercept process int num_par_j = ind_par_[1] - ind_par_[0]; vec_t pars = vec_t(num_par_j); re_comps_[unique_clusters_[0]][0]->FindInitCovPar(pars); for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } //set the same values to random coefficient processes for (int j = 1; j < num_gp_total_; ++j) { num_par_j = ind_par_[j + 1] - ind_par_[j]; for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } } } else { for (int j = 0; j < num_comps_total_; ++j) { int num_par_j = ind_par_[j + 1] - ind_par_[j]; vec_t pars = vec_t(num_par_j); re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars); for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } } } }//end FindInitCovPar int num_cov_par() { return(num_cov_par_); } /*! * \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting * Note: only used in GPBoost for combined Gaussian process tree-boosting (this is called from 'objective_function_->NewtonUpdateLeafValues'). It is assumed that 'CalcYAux' has been called before (from 'objective_function_->GetGradients'). * \param data_leaf_index Leaf index for every data point (array of size num_data) * \param num_leaves Number of leaves * \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves) * \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1. */ void NewtonUpdateLeafValues(const int* data_leaf_index, const int num_leaves, double* leaf_values, double marg_variance = 1.) { if (!gauss_likelihood_) { Log::REFatal("Newton updates for leaf values is only supported for Gaussian data"); } CHECK(y_aux_has_been_calculated_);//y_aux_ has already been calculated when calculating the gradient for finding the tree structure from 'GetGradients' in 'regression_objetive.hpp' den_mat_t HTPsiInvH(num_leaves, num_leaves); vec_t HTYAux(num_leaves); HTPsiInvH.setZero(); HTYAux.setZero(); for (const auto& cluster_i : unique_clusters_) { //Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) { entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.); } den_mat_t HTPsiInvH_cluster_i; if (vecchia_approx_) { sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx. H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end()); HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F) sp_mat_t BH = B_[cluster_i] * H_cluster_i; HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH); } else { sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves); H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end()); HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F) if (only_grouped_REs_use_woodbury_identity_) { T_mat MInvSqrtZtH; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal sp_mat_t ZtH_cluster_i = Zt_[cluster_i] * H_cluster_i; MInvSqrtZtH = sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().inverse().matrix().asDiagonal() * ZtH_cluster_i; } else { sp_mat_t ZtH_cluster_i; if (chol_fact_has_permutation_) { ZtH_cluster_i = P_Zt_[cluster_i] * H_cluster_i; } else { ZtH_cluster_i = Zt_[cluster_i] * H_cluster_i; } CalcPsiInvSqrtH(ZtH_cluster_i, MInvSqrtZtH, cluster_i, true, false); } HTPsiInvH_cluster_i = H_cluster_i.transpose() * H_cluster_i - MInvSqrtZtH.transpose() * MInvSqrtZtH; } else { T_mat PsiInvSqrtH; CalcPsiInvSqrtH(H_cluster_i, PsiInvSqrtH, cluster_i, true, true); HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH; } } HTPsiInvH += HTPsiInvH_cluster_i; } HTYAux *= marg_variance; vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux); for (int i = 0; i < num_leaves; ++i) { leaf_values[i] = new_leaf_values[i]; } }//end NewtonUpdateLeafValues private: // RESPONSE DATA /*! \brief Number of data points */ data_size_t num_data_; /*! \brief If true, the response variables have a Gaussian likelihood, otherwise not */ data_size_t gauss_likelihood_ = true; /*! \brief Likelihood objects */ std::map<data_size_t, std::unique_ptr<Likelihood<T_mat, T_chol>>> likelihood_; /*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data */ double neg_log_likelihood_; /*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data of previous iteration in optimization used for convergence checking */ double neg_log_likelihood_lag1_; /*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data after linear regression coefficients are update (this equals neg_log_likelihood_lag1_ if there are no regression coefficients). This is used for step-size checking for the covariance parameters */ double neg_log_likelihood_after_lin_coef_update_; /*! \brief Key: labels of independent realizations of REs/GPs, value: data y */ std::map<data_size_t, vec_t> y_; /*! \brief Copy of response data (used only for Gaussian data and if there are also linear covariates since then y_ is modified during the optimization algorithm and this contains the original data) */ vec_t y_vec_; /*! \brief Key: labels of independent realizations of REs/GPs, value: data y of integer type (used only for non-Gaussian likelihood) */ std::map<data_size_t, vec_int_t> y_int_; // Note: the response variable data is saved in y_ / y_int_ (depending on the likelihood type) for Gaussian data with no covariates and for all non-Gaussian data. // For Gaussian data with covariates, the response variables is saved in y_vec_ and y_ is replaced by y - X * beta during the optimization /*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */ std::map<data_size_t, vec_t> y_aux_; /*! \brief Key: labels of independent realizations of REs/GPs, value: L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when only_grouped_REs_use_woodbury_identity_==true) */ std::map<data_size_t, vec_t> y_tilde_; /*! \brief Key: labels of independent realizations of REs/GPs, value: Z * L ^ -T * L ^ -1 * Z ^ T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when only_grouped_REs_use_woodbury_identity_==true) */ std::map<data_size_t, vec_t> y_tilde2_; /*! \brief Indicates whether y_aux_ has been calculated */ bool y_aux_has_been_calculated_ = false; /*! \brief If true, the response variable data has been set (otherwise y_ is empty) */ bool y_has_been_set_ = false; // GROUPED RANDOM EFFECTS /*! \brief Number of grouped (intercept) random effects */ data_size_t num_re_group_ = 0; /*! \brief Number of grouped random coefficients */ data_size_t num_re_group_rand_coef_ = 0; /*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */ std::vector<int> ind_effect_group_rand_coef_; /*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */ data_size_t num_re_group_total_ = 0; // GAUSSIAN PROCESS /*! \brief 1 if there is a Gaussian process 0 otherwise */ data_size_t num_gp_ = 0; /*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove? int8_t GP_type_ = 0; /*! \brief Number of random coefficient GPs */ data_size_t num_gp_rand_coef_ = 0; /*! \brief Total number of GPs (random intercepts plus random coefficients) */ data_size_t num_gp_total_ = 0; /*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */ int ind_intercept_gp_; /*! \brief Dimension of the coordinates (=number of features) for Gaussian process */ int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs /*! \brief Type of covariance(kernel) function for Gaussian processes */ string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs /*! \brief Shape parameter of covariance function (=smoothness parameter for Matern and Wendland covariance. For the Wendland covariance function, we follow the notation of Bevilacqua et al. (2018)). This parameter is irrelevant for some covariance functions such as the exponential or Gaussian. */ double cov_fct_shape_ = 0.; /*! \brief Range parameter of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018) */ double cov_fct_taper_range_ = 1.; // RANDOM EFFECT / GP COMPONENTS /*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */ std::map<data_size_t, std::vector<std::shared_ptr<RECompBase<T_mat>>>> re_comps_; /*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] and ind_par_[i+1] -1 are the indices of the first and last parameter of component number i (counting starts at 1) */ std::vector<data_size_t> ind_par_; /*! \brief Number of covariance parameters */ data_size_t num_cov_par_; /*! \brief Total number of random effect components (grouped REs plus other GPs) */ data_size_t num_comps_total_ = 0; // SPECIAL CASES OF RE MODELS FOR FASTER CALCULATIONS /*! \brief If true, the Woodbury, Sherman and Morrison matrix inversion formula is used for calculating the inverse of the covariance matrix (only used if there are only grouped REs and no Gaussian processes) */ bool only_grouped_REs_use_woodbury_identity_ = false; /*! \brief True if there is only one grouped random effect component, and (all) calculations are done on the b-scale instead of the Zb-scale (currently used only for non-Gaussian data) */ bool only_one_grouped_RE_calculations_on_RE_scale_ = false; /*! \brief True if there is only one grouped random effect component for Gaussian data, can calculations for predictions (only) are done on the b-scale instead of the Zb-scale */ bool only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ = false; /*! \brief True if there is only one GP random effect component, and calculations are done on the b-scale instead of the Zb-scale (currently used only for non-Gaussian data) */ bool only_one_GP_calculations_on_RE_scale_ = false; // COVARIANCE MATRIX AND CHOLESKY FACTORS OF IT /*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky decomposition solver of covariance matrices Psi (for Gaussian data) */ std::map<data_size_t, T_chol> chol_facts_solve_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else? std::map<data_size_t, T_mat> chol_facts_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Square root of diagonal of matrix Sigma^-1 + Zt * Z (used only if there is only one grouped random effect and ZtZ is diagonal) */ std::map<data_size_t, vec_t> sqrt_diag_SigmaI_plus_ZtZ_; /*! \brief Indicates whether the covariance matrix has been factorized or not */ bool covariance_matrix_has_been_factorized_ = false; /*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */ std::map<data_size_t, T_mat> Id_; ///*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */ //std::map<data_size_t, cs> Id_cs_;//currently not used /*! \brief Key: labels of independent realizations of REs/GPs, values: Permuted idendity matrices used for calculation of inverse covariance matrix when Cholesky factors have a permutation matrix */ std::map<data_size_t, T_mat> P_Id_; /*! \brief Indicates whether a symbolic decomposition for calculating the Cholesky factor of the covariance matrix has been done or not (only for sparse matrices) */ bool chol_fact_pattern_analyzed_ = false; /*! \brief Indicates whether the Cholesky factor has an associated permutation matrix (only for sparse matrices) */ bool chol_fact_has_permutation_ = false; /*! \brief Collects inverse covariance matrices Psi^{-1} (usually not saved, but used e.g. in Fisher scoring without the Vecchia approximation) */ std::map<data_size_t, T_mat> psi_inv_; /*! \brief Inverse covariance matrices Sigma^-1 of random effects. This is only used if only_grouped_REs_use_woodbury_identity_==true (if there are only grouped REs) */ std::map<data_size_t, sp_mat_t> SigmaI_; /*! \brief Pointer to covariance matrix of the random effects (sum of all components). This is only used for non-Gaussian data and if only_grouped_REs_use_woodbury_identity_==false. In the Gaussian case this needs not be saved */ std::map<data_size_t, std::shared_ptr<T_mat>> ZSigmaZt_; // COVARIATE DATA FOR LINEAR REGRESSION TERM /*! \brief If true, the model linearly incluses covariates */ bool has_covariates_ = false; /*! \brief Number of covariates */ int num_coef_; /*! \brief Covariate data */ den_mat_t X_; /*! \brief Auxiliary matrix to store original, un-scaled covariate data (used only in case scaling is applied) */ den_mat_t X_orig_; // OPTIMIZER PROPERTIES /*! \brief List of supported optimizers for covariance parameters */ const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring", "nelder_mead" }; /*! \brief List of supported optimizers for regression coefficients */ const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls", "nelder_mead" }; /*! \brief List of supported convergence criteria used for terminating the optimization algorithm */ const std::set<string_t> SUPPORTED_CONV_CRIT_{ "relative_change_in_parameters", "relative_change_in_log_likelihood" }; /*! \brief Maximal number of steps for which step halving for the learning rate is done */ int MAX_NUMBER_HALVING_STEPS_ = 30; // WOODBURY IDENTITY FOR GROUPED RANDOM EFFECTS ONLY /*! \brief Collects matrices Z^T (only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */ std::map<data_size_t, sp_mat_t> Zt_; /*! \brief Collects matrices Z^TZ (only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */ std::map<data_size_t, sp_mat_t> ZtZ_; /*! \brief Collects vectors Z^Ty (only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */ std::map<data_size_t, vec_t> Zty_; /*! \brief Cumulative number of random effects for components (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */ std::map<data_size_t, std::vector<data_size_t>> cum_num_rand_eff_;//The random effects of component j start at cum_num_rand_eff_[0][j]+1 and end at cum_num_rand_eff_[0][j+1] /*! \brief Sum of squared entries of Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */ std::map<data_size_t, std::vector<double>> Zj_square_sum_; /*! \brief Collects matrices Z^T * Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */ std::map<data_size_t, std::vector<sp_mat_t>> ZtZj_; /*! \brief Collects matrices L^-1 * Z^T * Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects and when Fisher scoring is done) */ std::map<data_size_t, std::vector<T_mat>> LInvZtZj_; /*! \brief Permuted matrices Zt_ when Cholesky factors have a permutation matrix */ std::map<data_size_t, sp_mat_t> P_Zt_; /*! \brief Permuted matrices ZtZj_ when Cholesky factors have a permutation matrix */ std::map<data_size_t, std::vector<sp_mat_t>> P_ZtZj_; // VECCHIA APPROXIMATION for GP /*! \brief If true, the Veccia approximation is used for the Gaussian process */ bool vecchia_approx_ = false; /*! \brief If true, a memory optimized version of the Vecchia approximation is used (at the expense of being slightly slower). THiS IS CURRENTLY NOT IMPLEMENTED */ bool vecchia_approx_optim_memory = false; /*! \brief The number of neighbors used in the Vecchia approximation */ int num_neighbors_; /*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */ string_t vecchia_ordering_ = "none"; /*! \brief List of supported options for orderings of the Vecchia approximation */ const std::set<string_t> SUPPORTED_VECCHIA_ORDERING_{ "none", "random" }; /*! \brief The number of neighbors used in the Vecchia approximation for making predictions */ int num_neighbors_pred_; /*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */ string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm /*! \brief List of supported options for prediction with a Vecchia approximation */ const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only", "order_obs_first_cond_all", "order_pred_first", "latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" }; /*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */ std::map<data_size_t, std::vector<std::vector<int>>> nearest_neighbors_; /*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */ std::map<data_size_t, std::vector<den_mat_t>> dist_obs_neighbors_; /*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */ std::map<data_size_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m. /*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */ std::map<data_size_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_; /*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */ std::map<data_size_t, sp_mat_t> B_; /*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */ std::map<data_size_t, sp_mat_t> D_inv_; /*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */ std::map<data_size_t, std::vector<sp_mat_t>> B_grad_; /*! \brief Collects derivatives of matrices D for Vecchia approximation */ std::map<data_size_t, std::vector<sp_mat_t>> D_grad_; /*! \brief Triplets for intializing the matrices B */ std::map<data_size_t, std::vector<Triplet_t>> entries_init_B_; /*! \brief Triplets for intializing the matrices B_grad */ std::map<data_size_t, std::vector<Triplet_t>> entries_init_B_grad_; // CLUSTERs of INDEPENDENT REALIZATIONS /*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */ std::map<data_size_t, std::vector<int>> data_indices_per_cluster_; /*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */ std::map<data_size_t, int> num_data_per_cluster_; /*! \brief Number of independent realizations of the REs/GPs */ data_size_t num_clusters_; /*! \brief Unique labels of independent realizations */ std::vector<data_size_t> unique_clusters_; /*! \brief Variance of idiosyncratic error term (nugget effect) (only used in OptimExternal) */ double sigma2_; /*! \brief Quadratic form y^T Psi^-1 y (saved for avoiding double computations when profiling out sigma2 for Gaussian data) */ double yTPsiInvy_; /*! \brief Determiannt Psi (only used in OptimExternal for avoiding double computations) */ double log_det_Psi_; // PREDICTION /*! \brief Cluster IDs for prediction */ std::vector<data_size_t> cluster_ids_data_pred_; /*! \brief Levels of grouped RE for prediction */ std::vector<std::vector<re_group_t>> re_group_levels_pred_; /*! \brief Covariate data for grouped random RE for prediction */ std::vector<double> re_group_rand_coef_data_pred_; /*! \brief Coordinates for GP for prediction */ std::vector<double> gp_coords_data_pred_; /*! \brief Covariate data for random GP for prediction */ std::vector<double> gp_rand_coef_data_pred_; /*! \brief Covariate data for linear regression term */ std::vector<double> covariate_data_pred_; /*! \brief Number of prediction points */ data_size_t num_data_pred_; /*! \brief Nesterov schedule */ double NesterovSchedule(int iter, int momentum_schedule_version = 0, double nesterov_acc_rate = 0.5, int momentum_offset = 2) { if (iter < momentum_offset) { return(0.); } else { if (momentum_schedule_version == 0) { return(nesterov_acc_rate); } else if (momentum_schedule_version == 1) { return(1. - (3. / (6. + iter))); } else { return(0.); } } } /*! \brief mutex for threading safe call */ std::mutex mutex_; /*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void ConstructI(data_size_t cluster_i) { int dim_I = only_grouped_REs_use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i]; T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix I.setIdentity(); Id_.insert({ cluster_i, I }); //cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects. //Id_cs.nzmax = dim_I; //Id_cs.m = dim_I; //Id_cs.n = dim_I; //Id_[cluster_i].makeCompressed(); //Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr());//currently not used //Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr()); //Id_cs.x = Id_[cluster_i].valuePtr(); //Id_cs.nz = -1; //Id_cs_.insert({ cluster_i, Id_cs }); } /*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void ConstructI(data_size_t cluster_i) { int dim_I = only_grouped_REs_use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i]; T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix I.setIdentity(); Id_.insert({ cluster_i, I }); } /*! * \brief Set response variable data y_ (and calculate Z^T * y if only_grouped_REs_use_woodbury_identity_ == true) * \param y_data Response variable data */ void SetY(const double* y_data) { if (gauss_likelihood_) { if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) { y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_);//TODO: Is there a more efficient way that avoids copying? } else { for (const auto& cluster_i : unique_clusters_) { y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying? for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]]; } } } if (only_grouped_REs_use_woodbury_identity_) { CalcZtY(); } }//end gauss_likelihood_ else {//not gauss_likelihood_ (*likelihood_[unique_clusters_[0]]).template CheckY<double>(y_data, num_data_); if (likelihood_[unique_clusters_[0]]->label_type() == "int") { for (const auto& cluster_i : unique_clusters_) { y_int_[cluster_i] = vec_int_t(num_data_per_cluster_[cluster_i]); for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_int_[cluster_i][j] = static_cast<int>(y_data[data_indices_per_cluster_[cluster_i][j]]); } (*likelihood_[cluster_i]).template CalculateNormalizingConstant<int>(y_int_[cluster_i].data(), num_data_per_cluster_[cluster_i]); } } else if (likelihood_[unique_clusters_[0]]->label_type() == "double") { for (const auto& cluster_i : unique_clusters_) { y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]); for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]]; } (*likelihood_[cluster_i]).template CalculateNormalizingConstant<double>(y_[cluster_i].data(), num_data_per_cluster_[cluster_i]); } } }//end not gauss_likelihood_ y_has_been_set_ = true; } /*! * \brief Set response variable data y_ if data is of type float (used for GPBoost algorithm since labels are float) * \param y_data Response variable data */ void SetY(const float* y_data) { if (gauss_likelihood_) { Log::REFatal("SetY is not implemented for Gaussian data and lables of type float (since it is not needed)"); }//end gauss_likelihood_ else {//not gauss_likelihood_ (*likelihood_[unique_clusters_[0]]).template CheckY<float>(y_data, num_data_); if (likelihood_[unique_clusters_[0]]->label_type() == "int") { for (const auto& cluster_i : unique_clusters_) { y_int_[cluster_i] = vec_int_t(num_data_per_cluster_[cluster_i]); for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_int_[cluster_i][j] = static_cast<int>(y_data[data_indices_per_cluster_[cluster_i][j]]); } (*likelihood_[cluster_i]).template CalculateNormalizingConstant<int>(y_int_[cluster_i].data(), num_data_per_cluster_[cluster_i]); } } else if (likelihood_[unique_clusters_[0]]->label_type() == "double") { for (const auto& cluster_i : unique_clusters_) { y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]); for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_[cluster_i][j] = static_cast<double>(y_data[data_indices_per_cluster_[cluster_i][j]]); } (*likelihood_[cluster_i]).template CalculateNormalizingConstant<double>(y_[cluster_i].data(), num_data_per_cluster_[cluster_i]); } } } y_has_been_set_ = true; } /*! * \brief Return (last used) response variable data * \param[out] y Response variable data (memory needs to be preallocated) */ void GetY(double* y) { if (!y_has_been_set_) { Log::REFatal("Respone variable data has not been set"); } if (has_covariates_ && gauss_likelihood_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_; ++i) { y[i] = y_vec_[i]; } } else if (likelihood_[unique_clusters_[0]]->label_type() == "double") { for (const auto& cluster_i : unique_clusters_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) { y[data_indices_per_cluster_[cluster_i][i]] = y_[cluster_i][i]; } } } else if (likelihood_[unique_clusters_[0]]->label_type() == "int") { for (const auto& cluster_i : unique_clusters_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) { y[data_indices_per_cluster_[cluster_i][i]] = y_int_[cluster_i][i]; } } } } /*! * \brief Return covariate data * \param[out] covariate_data covariate data */ void GetCovariateData(double* covariate_data) { if (!has_covariates_) { Log::REFatal("Model does not have covariates for a linear predictor"); } #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_ * num_coef_; ++i) { covariate_data[i] = X_.data()[i]; } } /*! * \brief Calculate Z^T*y (use only when only_grouped_REs_use_woodbury_identity_ == true) */ void CalcZtY() { for (const auto& cluster_i : unique_clusters_) { Zty_[cluster_i] = Zt_[cluster_i] * y_[cluster_i]; } } /*! * \brief Get y_aux = Psi^-1*y * \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_ */ void GetYAux(double* y_aux) { CHECK(y_aux_has_been_calculated_); if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) { #pragma omp parallel for schedule(static) for (int j = 0; j < num_data_; ++j) { y_aux[j] = y_aux_[unique_clusters_[0]][j]; } } else { for (const auto& cluster_i : unique_clusters_) { #pragma omp parallel for schedule(static) for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j]; } } } } /*! * \brief Get y_aux = Psi^-1*y * \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_ */ void GetYAux(vec_t& y_aux) { CHECK(y_aux_has_been_calculated_); if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) { y_aux = y_aux_[unique_clusters_[0]]; } else { for (const auto& cluster_i : unique_clusters_) { y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i]; } } } /*! * \brief Calculate the gradient of the Laplace-approximated negative log-likelihood with respect to the fixed effects F (only used for non-Gaussian data) * \param[out] grad_F Gradient of the Laplace-approximated negative log-likelihood with respect to the fixed effects F. This vector needs to be pre-allocated of length num_data_ * \param fixed_effects Fixed effects component of location parameter */ void CalcGradFLaplace(double* grad_F, const double* fixed_effects = nullptr) { //std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();// Only for debugging const double* fixed_effects_cluster_i_ptr = nullptr; vec_t fixed_effects_cluster_i; for (const auto& cluster_i : unique_clusters_) { vec_t grad_F_cluster_i(num_data_per_cluster_[cluster_i]); //map fixed effects to clusters (if needed) if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter fixed_effects_cluster_i_ptr = fixed_effects; } else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying? #pragma omp parallel for schedule(static) for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]]; } fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data(); } if (vecchia_approx_) {//vecchia_approx_ likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxVecchia(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], false, true, nullptr, grad_F_cluster_i, false); }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxGroupedRE(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], SigmaI_[cluster_i], Zt_[cluster_i], cum_num_rand_eff_[cluster_i], false, true, nullptr, grad_F_cluster_i, false); } else if (only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], re_comps_[cluster_i][0]->cov_pars_[0], re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), false, true, nullptr, grad_F_cluster_i, false); } else if (only_one_GP_calculations_on_RE_scale_) { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), re_comps_[cluster_i], false, true, nullptr, grad_F_cluster_i, false); } else { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxStable(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], re_comps_[cluster_i], false, true, nullptr, grad_F_cluster_i, false); } }//end not vecchia_approx_ //write on output if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter #pragma omp parallel for schedule(static)//write on output for (int j = 0; j < num_data_; ++j) { grad_F[j] = grad_F_cluster_i[j]; } } else {//more than one cluster and order of samples matters #pragma omp parallel for schedule(static) for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { grad_F[data_indices_per_cluster_[cluster_i][j]] = grad_F_cluster_i[j]; } } // end more than one cluster }//end loop over cluster //std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();// Only for debugging //double el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()) / 1000000.;// Only for debugging //Log::REInfo("Time for CalcGradFLaplace: %g", el_time);// Only for debugging }//end CalcGradFLaplace /*! * \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if sparse matrices are used * \param psi Covariance matrix for which the Cholesky decomposition should be done * \param cluster_i Cluster index for which the Cholesky factor is calculated */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcChol(T3& psi, data_size_t cluster_i) { if (!chol_fact_pattern_analyzed_) { chol_facts_solve_[cluster_i].analyzePattern(psi); if (cluster_i == unique_clusters_.back()) { chol_fact_pattern_analyzed_ = true; } if (chol_facts_solve_[cluster_i].permutationP().size() > 0) {//Apply permutation if an ordering is used chol_fact_has_permutation_ = true; P_Id_[cluster_i] = chol_facts_solve_[cluster_i].permutationP() * Id_[cluster_i]; if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { P_Zt_[cluster_i] = chol_facts_solve_[cluster_i].permutationP() * Zt_[cluster_i]; std::vector<sp_mat_t> P_ZtZj_cluster_i(num_comps_total_); for (int j = 0; j < num_comps_total_; ++j) { P_ZtZj_cluster_i[j] = chol_facts_solve_[cluster_i].permutationP() * ZtZj_[cluster_i][j]; } P_ZtZj_[cluster_i] = P_ZtZj_cluster_i; } } else { chol_fact_has_permutation_ = false; } } chol_facts_solve_[cluster_i].factorize(psi); chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL(); chol_facts_[cluster_i].makeCompressed(); } /*! * \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if dense matrices are used * \param psi Covariance matrix for which the Cholesky decomposition should be done * \param cluster_i Cluster index for which the Cholesky factor is calculated */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcChol(T3& psi, data_size_t cluster_i) { chol_facts_solve_[cluster_i].compute(psi); chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL(); chol_fact_has_permutation_ = false; } /*! * \brief Apply permutation matrix of Cholesky factor (if it exists) * \param M[out] Matrix to which the permutation is applied to * \param cluster_i Cluster index */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void ApplyPermutationCholeskyFactor(T3& M, data_size_t cluster_i) { if (chol_facts_solve_[cluster_i].permutationP().size() > 0) {//Apply permutation if an ordering is used M = chol_facts_solve_[cluster_i].permutationP() * M; } } template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void ApplyPermutationCholeskyFactor(T3&, data_size_t) { } /*! * \brief Caclulate Psi^(-1) if sparse matrices are used * \param psi_inv[out] Inverse covariance matrix * \param cluster_i Cluster index for which Psi^(-1) is calculated */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcPsiInv(T3& psi_inv, data_size_t cluster_i) { if (only_grouped_REs_use_woodbury_identity_) { sp_mat_t MInvSqrtZt; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvSqrtZt = sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().inverse().matrix().asDiagonal() * Zt_[cluster_i]; } else { sp_mat_t L_inv; if (chol_fact_has_permutation_) { eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], P_Id_[cluster_i], L_inv, true); } else { eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Id_[cluster_i], L_inv, true); } MInvSqrtZt = L_inv * Zt_[cluster_i]; } psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;//this is slow since n can be large (O(n^2*m)) psi_inv.diagonal().array() += 1.0; } else { ////Using CSparse function 'cs_spsolve' //cs L_cs = cs();//Prepare LHS //L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros(); //L_cs.m = num_data_per_cluster_[cluster_i]; //L_cs.n = num_data_per_cluster_[cluster_i]; //L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr()); //L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr()); //L_cs.x = chol_facts_[cluster_i].valuePtr(); //L_cs.nz = -1; ////Invert Cholesky factor //sp_mat_t L_inv; //sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true); //psi_inv = L_inv.transpose() * L_inv; // Alternative version that avoids the use of CSparse function 'cs_spsolve' on OS's (e.g. Linux) on which this can cause problems sp_mat_t L_inv; if (chol_fact_has_permutation_) { eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], P_Id_[cluster_i], L_inv, true); } else { eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], Id_[cluster_i], L_inv, true); } psi_inv = L_inv.transpose() * L_inv;//Note: this is the computational bottleneck for large data when psi=ZSigmaZt and its Cholesky factor is sparse e.g. when having a Wendland covariance function ////Version 2: doing sparse solving "by hand" but ignoring sparse RHS //const double* val = chol_facts_[cluster_i].valuePtr(); //const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); //const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); //den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]); //for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]); //} //const sp_mat_t L_inv = L_inv_dens.sparseView(); //psi_inv = L_inv.transpose() * L_inv; ////Version 3: let Eigen do the solving //psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); } }// end CalcPsiInv for sparse matrices /*! * \brief Caclulate Psi^(-1) if dense matrices are used * \param psi_inv[out] Inverse covariance matrix * \param cluster_i Cluster index for which Psi^(-1) is calculated */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcPsiInv(T3& psi_inv, data_size_t cluster_i) { if (only_grouped_REs_use_woodbury_identity_) {//typically currently not called as only_grouped_REs_use_woodbury_identity_ is only true for grouped REs only i.e. sparse matrices T3 MInvSqrtZt; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvSqrtZt = sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().inverse().matrix().asDiagonal() * Zt_[cluster_i]; } else { MInvSqrtZt = Zt_[cluster_i]; #pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization? for (int j = 0; j < (int)MInvSqrtZt.cols(); ++j) { L_solve(chol_facts_[cluster_i].data(), (int)chol_facts_[cluster_i].cols(), MInvSqrtZt.data() + j * (int)MInvSqrtZt.cols()); } } psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt; psi_inv.diagonal().array() += 1.0; } else { //Version 2: solving by hand T3 L_inv = Id_[cluster_i]; #pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization? for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]); } //chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower psi_inv = L_inv.transpose() * L_inv; ////Version 2 //psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); // Using dpotri from LAPACK does not work since LAPACK is not installed //int info = 0; //int n = num_data_per_cluster_[cluster_i]; //int lda = num_data_per_cluster_[cluster_i]; //char* uplo = "L"; //den_mat_t M = chol_facts_[cluster_i]; //BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info); } }// end CalcPsiInv for dense matrices /*! * \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' and if only_grouped_REs_use_woodbury_identity_ == true * \param H Right-hand side matrix H * \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H) * \param cluster_i Cluster index for which Psi^(-0.5)H is calculated * \param lower true if chol_facts_[cluster_i] is a lower triangular matrix * \param permute_H If true, a permutation is applied on H (overwritten) in case the Cholesky factor has a permutation matrix */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, data_size_t cluster_i, bool lower, bool permute_H) { if (permute_H) { if (chol_fact_has_permutation_) { H = chol_facts_solve_[cluster_i].permutationP() * H; } } eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, lower); //TODO: use eigen_sp_Lower_sp_RHS_cs_solve -> faster? (currently this crashes due to Eigen bug, see the definition of sp_Lower_sp_RHS_cs_solve for more details) } /*! * \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' and if only_grouped_REs_use_woodbury_identity_ == true * \param H Right-hand side matrix H * \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H) * \param cluster_i Cluster index for which Psi^(-0.5)H is calculated * \param lower true if chol_facts_[cluster_i] is a lower triangular matrix * \param permute_H Not used */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, data_size_t cluster_i, bool lower, bool) { PsiInvSqrtH = den_mat_t(H); #pragma omp parallel for schedule(static) for (int j = 0; j < H.cols(); ++j) { if (lower) { L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]); } else { L_t_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]); } } } ///*! //* \brief Caclulate X^TPsi^(-1)X //* \param X Covariate data matrix X //* \param[out] XT_psi_inv_X X^TPsi^(-1)X //*/ // template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > // void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // den_mat_t BX; // if (num_clusters_ == 1) { // data_size_t cluster0 = unique_clusters_[0]; // if (vecchia_approx_) { // BX = B_[cluster0] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX; // } // else { // BX = X; // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) { // L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]); // } // XT_psi_inv_X = BX.transpose() * BX; // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // BX = X(data_indices_per_cluster_[cluster_i], Eigen::all); // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]); // } // XT_psi_inv_X += (BX.transpose() * BX); // } // } // } // } // //same for sparse matrices // template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > // void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // den_mat_t BX; // if (num_clusters_ == 1) { // data_size_t cluster0 = unique_clusters_[0]; // if (vecchia_approx_) { // BX = B_[cluster0] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX; // } // else { // BX = X; // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) { // sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(), // num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]); // } // XT_psi_inv_X = BX.transpose() * BX; // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // BX = X(data_indices_per_cluster_[cluster_i], Eigen::all); // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(), // num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]); // } // XT_psi_inv_X += (BX.transpose() * BX); // } // } // } // } /*! * \brief Caclulate X^TPsi^(-1)X * \param X Covariate data matrix X * \param[out] XT_psi_inv_X X^TPsi^(-1)X */ void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / idependent GP realization if (vecchia_approx_) { den_mat_t BX = B_[unique_clusters_[0]] * X; XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX; } else { if (only_grouped_REs_use_woodbury_identity_) { den_mat_t ZtX = Zt_[unique_clusters_[0]] * X; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal den_mat_t MInvSqrtZtX = sqrt_diag_SigmaI_plus_ZtZ_[unique_clusters_[0]].array().inverse().matrix().asDiagonal() * ZtX; XT_psi_inv_X = X.transpose() * X - MInvSqrtZtX.transpose() * MInvSqrtZtX; } else { //TODO: use only one forward solve (sp_L_solve for sparse and sp_L_solve for dense matrices) instead of using Eigens solver which does two solves. But his requires a templace function since the Cholesky factor is T_mat XT_psi_inv_X = X.transpose() * X - ZtX.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(ZtX); } } else { XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X); } } }//end only one cluster / idependent GP realization else {//more than one cluster and order of samples matters XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); XT_psi_inv_X.setZero(); den_mat_t BX; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; } else { if (only_grouped_REs_use_woodbury_identity_) { den_mat_t ZtX = Zt_[cluster_i] * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal den_mat_t MInvSqrtZtX = sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().inverse().matrix().asDiagonal() * ZtX; XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) - MInvSqrtZtX.transpose() * MInvSqrtZtX; } else { XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) - ZtX.transpose() * chol_facts_solve_[cluster_i].solve(ZtX); } } else { XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)); } } } }//end more than one cluster } /*! * \brief Initialize data structures for handling independent realizations of the Gaussian processes * \param num_data Number of data points * \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) * \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization * \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param[out] unique_clusters Unique labels of independent realizations * \param[out] num_clusters Number of independent clusters */ void SetUpGPIds(data_size_t num_data, const data_size_t* cluster_ids_data, std::map<data_size_t, int>& num_data_per_cluster, std::map<data_size_t, std::vector<int>>& data_indices_per_cluster, std::vector<data_size_t>& unique_clusters, data_size_t& num_clusters) { if (cluster_ids_data != nullptr) { for (int i = 0; i < num_data; ++i) { if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i] unique_clusters.push_back(cluster_ids_data[i]); num_data_per_cluster.insert({ cluster_ids_data[i], 1 }); std::vector<int> id; id.push_back(i); data_indices_per_cluster.insert({ cluster_ids_data[i], id }); } else { num_data_per_cluster[cluster_ids_data[i]] += 1; data_indices_per_cluster[cluster_ids_data[i]].push_back(i); } } num_clusters = (data_size_t)unique_clusters.size(); } else { unique_clusters.push_back(0); num_data_per_cluster.insert({ 0, num_data }); num_clusters = 1; std::vector<int> gp_id_vec(num_data); for (int i = 0; i < num_data; ++i) { gp_id_vec[i] = i; } data_indices_per_cluster.insert({ 0, gp_id_vec }); } } /*! * \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels * \param num_data Number of data points * \param num_re_group Number of grouped random effects * \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects */ void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group, const char* re_group_data, std::vector<std::vector<re_group_t>>& re_group_levels) { int char_start = 0; for (int ire = 0; ire < num_re_group; ++ire) { for (int id = 0; id < num_data; ++id) { int number_chars = 0; while (re_group_data[char_start + number_chars] != '\0') { number_chars++; } re_group_levels[ire][id] = std::string(re_group_data + char_start); char_start += number_chars + 1; } } } /*! * \brief Initialize likelihoods * \param likelihood Likelihood name */ void InitializeLikelihoods(const string_t& likelihood) { for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_mat, T_chol>>(new Likelihood<T_mat, T_chol>(likelihood, num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i], false)); } else { if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_mat, T_chol>>(new Likelihood<T_mat, T_chol>(likelihood, num_data_per_cluster_[cluster_i], cum_num_rand_eff_[cluster_i][num_comps_total_], false)); } else if (only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_mat, T_chol>>(new Likelihood<T_mat, T_chol>(likelihood, num_data_per_cluster_[cluster_i], re_comps_[cluster_i][0]->GetNumUniqueREs(), false)); } else if (only_one_GP_calculations_on_RE_scale_) { likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_mat, T_chol>>(new Likelihood<T_mat, T_chol>(likelihood, num_data_per_cluster_[cluster_i], re_comps_[cluster_i][0]->GetNumUniqueREs(), true)); } else { likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_mat, T_chol>>(new Likelihood<T_mat, T_chol>(likelihood, num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i], true)); } } if (!gauss_likelihood_) { likelihood_[cluster_i]->InitializeModeAvec(); } } } /*! * \brief Function that determines * (i) the indices (in ind_par_) of the covariance parameters of every random effect component in the vector of all covariance parameter * (ii) the total number of covariance parameters */ void DetermineCovarianceParameterIndicesNumCovPars() { // Determine ind_par_ and num_cov_par_ ind_par_ = std::vector<data_size_t>(); //First re_comp has either index 0 or 1 (the latter if there is an nugget effect for Gaussian data) if (gauss_likelihood_) { num_cov_par_ = 1; ind_par_.push_back(1); } else { num_cov_par_ = 0; ind_par_.push_back(0); } //Add indices of parameters of individual components in joint parameter vector for (int j = 0; j < (int)re_comps_[unique_clusters_[0]].size(); ++j) { ind_par_.push_back(ind_par_.back() + re_comps_[unique_clusters_[0]][j]->NumCovPar());//end points of parameter indices of components num_cov_par_ += re_comps_[unique_clusters_[0]][j]->NumCovPar(); } } /*! * \brief Function that determines whether to use special options for estimation and prediction for certain special cases of random effects models */ void DetermineSpecialCasesModelsEstimationPrediction() { chol_fact_pattern_analyzed_ = false; // Decide whether to use the Woodbury identity (i.e. do matrix inversion on the b scale and not the Zb scale) for grouped random effects models only if (num_re_group_ > 0 && num_gp_total_ == 0) { only_grouped_REs_use_woodbury_identity_ = true;//Faster to use Woodbury identity since the dimension of the random effects is typically much smaller than the number of data points //Note: the use of the Woodburry identity is currently only implemented for grouped random effects (which is also the only use of it). // If this should be applied to GPs in the future, adaptions need to be made e.g. in the calculations of the gradient (see y_tilde2_) } else { only_grouped_REs_use_woodbury_identity_ = false; } // Following are options that depend on the type of likelihood used //Define options for faster calculations for special cases of RE models only_one_GP_calculations_on_RE_scale_ = num_gp_total_ == 1 && num_comps_total_ == 1 && !gauss_likelihood_ && !vecchia_approx_;//If there is only one GP, we do calculations on the b-scale instead of Zb-scale (currently only for non-Gaussian data) only_one_grouped_RE_calculations_on_RE_scale_ = num_re_group_total_ == 1 && num_comps_total_ == 1 && !gauss_likelihood_;//If there is only one grouped RE, we do (all) calculations on the b-scale instead of the Zb-scale (currently only for non-Gaussian data) only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ = num_re_group_total_ == 1 && num_comps_total_ == 1 && gauss_likelihood_;//If there is only one grouped RE, we do calculations for prediction on the b-scale instead of the Zb-scale (only effective for Gaussian data) } /*! * \brief Initialize required matrices used when only_grouped_REs_use_woodbury_identity_==true */ void InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity() { CHECK(num_comps_total_ == num_re_group_total_); Zt_ = std::map<data_size_t, sp_mat_t>(); ZtZ_ = std::map<data_size_t, sp_mat_t>(); cum_num_rand_eff_ = std::map<data_size_t, std::vector<data_size_t>>(); Zj_square_sum_ = std::map<data_size_t, std::vector<double>>(); ZtZj_ = std::map<data_size_t, std::vector<sp_mat_t>>(); for (const auto& cluster_i : unique_clusters_) { std::vector<data_size_t> cum_num_rand_eff_cluster_i(num_comps_total_ + 1); cum_num_rand_eff_cluster_i[0] = 0; //Determine number of rows and non-zero entries of Z int non_zeros = 0; int ncols = 0; for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); ncols += (int)Z_j->cols(); non_zeros += (int)Z_j->nonZeros(); cum_num_rand_eff_cluster_i[j + 1] = ncols; } //Create matrix Z and calculate sum(Z_j^2) = trace(Z_j^T * Z_j) std::vector<Triplet_t> triplets; triplets.reserve(non_zeros); std::vector<double> Zj_square_sum_cluster_i(num_comps_total_); int ncol_prev = 0; for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); for (int k = 0; k < Z_j->outerSize(); ++k) { for (sp_mat_t::InnerIterator it(*Z_j, k); it; ++it) { triplets.emplace_back(it.row(), ncol_prev + it.col(), it.value()); } } ncol_prev += (int)Z_j->cols(); Zj_square_sum_cluster_i[j] = Z_j->squaredNorm(); } sp_mat_t Z_cluster_i(num_data_per_cluster_[cluster_i], ncols); Z_cluster_i.setFromTriplets(triplets.begin(), triplets.end()); sp_mat_t Zt_cluster_i = Z_cluster_i.transpose(); sp_mat_t ZtZ_cluster_i = Zt_cluster_i * Z_cluster_i; //Calculate Z^T * Z_j std::vector<sp_mat_t> ZtZj_cluster_i(num_comps_total_); for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); ZtZj_cluster_i[j] = Zt_cluster_i * (*Z_j); } //Save all quantities Zt_.insert({ cluster_i, Zt_cluster_i }); ZtZ_.insert({ cluster_i, ZtZ_cluster_i }); cum_num_rand_eff_.insert({ cluster_i, cum_num_rand_eff_cluster_i }); Zj_square_sum_.insert({ cluster_i, Zj_square_sum_cluster_i }); ZtZj_.insert({ cluster_i, ZtZj_cluster_i }); } } /*! * \brief Initialize identity matrices required for Gaussian data */ void InitializeIdentityMatricesForGaussianData() { if (gauss_likelihood_) { for (const auto& cluster_i : unique_clusters_) { ConstructI<T_mat>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent for Gaussian data } } } /*! * \brief Function that checks the compatibility of the chosen special options for estimation and prediction for certain special cases of random effects models */ void CheckCompatibilitySpecialOptions() { //Some checks if (only_one_GP_calculations_on_RE_scale_ && only_grouped_REs_use_woodbury_identity_) { Log::REFatal("Cannot set both 'only_one_GP_calculations_on_RE_scale_' and 'only_grouped_REs_use_woodbury_identity_' to 'true'"); } if (only_one_GP_calculations_on_RE_scale_ && only_one_grouped_RE_calculations_on_RE_scale_) { Log::REFatal("Cannot set both 'only_one_GP_calculations_on_RE_scale_' and 'only_one_grouped_RE_calculations_on_RE_scale_' to 'true'"); } if (vecchia_approx_) {//vecchia_approx_ if (num_re_group_total_ > 0) { Log::REFatal("Vecchia approximation can currently not be used when there are grouped random effects"); } } if (only_one_GP_calculations_on_RE_scale_) {//only_one_GP_calculations_on_RE_scale_ if (gauss_likelihood_) { Log::REFatal("Option 'only_one_GP_calculations_on_RE_scale_' is currently not implemented for Gaussian data"); } if (vecchia_approx_) { Log::REFatal("Option 'only_one_GP_calculations_on_RE_scale_' is currently not implemented for Vecchia approximation data"); } CHECK(num_gp_total_ == 1); CHECK(num_comps_total_ == 1); CHECK(num_re_group_total_ == 0); } if (only_one_grouped_RE_calculations_on_RE_scale_) {//only_one_grouped_RE_calculations_on_RE_scale_ if (gauss_likelihood_) { Log::REFatal("Option 'only_one_grouped_RE_calculations_on_RE_scale_' is currently not implemented for Gaussian data"); } CHECK(!vecchia_approx_); CHECK(num_gp_total_ == 0); CHECK(num_comps_total_ == 1); CHECK(num_re_group_total_ == 1); } if (only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {//only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ CHECK(!vecchia_approx_); CHECK(num_gp_total_ == 0); CHECK(num_comps_total_ == 1); CHECK(num_re_group_total_ == 1); if (!gauss_likelihood_) { Log::REFatal("Option 'only_one_grouped_RE_calculations_on_RE_scale_for_prediction_' is currently only effective for Gaussian data"); } } if (only_grouped_REs_use_woodbury_identity_) {//only_grouped_REs_use_woodbury_identity_ if (gauss_likelihood_ && only_one_grouped_RE_calculations_on_RE_scale_) { Log::REFatal("Cannot enable 'only_one_grouped_RE_calculations_on_RE_scale_' if 'only_grouped_REs_use_woodbury_identity_' is enabled for Gaussian data"); } CHECK(num_gp_total_ == 0); CHECK(num_comps_total_ == num_re_group_total_); } } /*! * \brief Initialize individual component models and collect them in a containter * \param num_data Number of data points * \param num_re_group Number of grouped random effects * \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points * \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed * \param Group levels for every grouped random effect * \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization * \param num_re_group_rand_coef Number of grouped random coefficients * \param re_group_rand_coef_data Covariate data for grouped random coefficients * \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1. * \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting) * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian processes * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance) * \param cov_fct_taper_range Range parameter of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018) * \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs * \param calculateZZt If true, the matrix Z*Z^T is calculated for grouped random effects and saved (usually not needed if Woodbury identity is used) * \param[out] re_comps_cluster_i Container that collects the individual component models */ void CreateREComponents(data_size_t num_data, data_size_t num_re_group, std::map<data_size_t, std::vector<int>>& data_indices_per_cluster, data_size_t cluster_i, std::vector<std::vector<re_group_t>>& re_group_levels, std::map<data_size_t, int>& num_data_per_cluster, data_size_t num_re_group_rand_coef, const double* re_group_rand_coef_data, std::vector<int>& ind_effect_group_rand_coef, data_size_t num_gp, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, double cov_fct_taper_range, int ind_intercept_gp, bool calculateZZt, std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i) { //Grouped REs if (num_re_group > 0) { for (int j = 0; j < num_re_group; ++j) { std::vector<re_group_t> group_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id])); } re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T_mat>>(new RECompGroup<T_mat>( group_data, calculateZZt, !only_one_grouped_RE_calculations_on_RE_scale_))); } //Random slopes if (num_re_group_rand_coef > 0) { for (int j = 0; j < num_re_group_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]); } std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0 re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T_mat>>(new RECompGroup<T_mat>( re_comp->random_effects_indices_of_data_.data(), re_comp->num_data_, re_comp->map_group_label_index_, re_comp->num_group_, rand_coef_data, calculateZZt))); } } } //GPs if (num_gp > 0) { std::vector<double> gp_coords; for (int j = 0; j < dim_gp_coords; ++j) { for (const auto& id : data_indices_per_cluster[cluster_i]) { gp_coords.push_back(gp_coords_data[j * num_data + id]); } } den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>( gp_coords_mat, cov_fct, cov_fct_shape, cov_fct_taper_range, true, only_one_GP_calculations_on_RE_scale_))); //Random slopes if (num_gp_rand_coef > 0) { for (int j = 0; j < num_gp_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]); } std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_cluster_i[ind_intercept_gp]); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>( re_comp->dist_, re_comp->has_Z_, &re_comp->Z_, rand_coef_data, cov_fct, cov_fct_shape, cov_fct_taper_range, re_comp->GetTaperMu()))); } } } } /*! * \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used * \param num_data Number of data points * \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points * \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed * \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian processes * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance) * \param cov_fct_taper_range Range parameter of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018) * \param[out] re_comps_cluster_i Container that collects the individual component models * \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors * \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors * \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations * \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B * \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad * \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j * \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering * \param num_neighbors The number of neighbors used in the Vecchia approximation */ void CreateREComponentsVecchia(data_size_t num_data, std::map<data_size_t, std::vector<int>>& data_indices_per_cluster, data_size_t cluster_i, std::map<data_size_t, int>& num_data_per_cluster, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, double cov_fct_taper_range, std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i, std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i, std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i, std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, string_t vecchia_ordering = "none", int num_neighbors = 30) { int ind_intercept_gp = (int)re_comps_cluster_i.size(); if (vecchia_ordering == "random") { unsigned seed = 0; std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed)); } std::vector<double> gp_coords; for (int j = 0; j < dim_gp_coords; ++j) { for (const auto& id : data_indices_per_cluster[cluster_i]) { gp_coords.push_back(gp_coords_data[j * num_data + id]); } } den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>( gp_coords_mat, cov_fct, cov_fct_shape, cov_fct_taper_range, false, false))); find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) { for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) { entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.)); entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.)); } entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A } //Random coefficients if (num_gp_rand_coef > 0) { std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_cluster_i[ind_intercept_gp]); for (int j = 0; j < num_gp_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]); } re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>( rand_coef_data, cov_fct, cov_fct_shape, cov_fct_taper_range, re_comp->GetTaperMu()))); //save random coefficient data in the form ot outer product matrices #pragma omp for schedule(static) for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) { if (j == 0) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef); } int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1); vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[i]; if (i > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } } /*! * \brief Set the covariance parameters of the components * \param cov_pars Covariance parameters */ void SetCovParsComps(const vec_t& cov_pars) { CHECK(cov_pars.size() == num_cov_par_); for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); re_comps_[cluster_i][j]->SetCovPars(pars); } } } /*! * \brief Transform the covariance parameters to the scake on which the MLE is found * \param cov_pars_trans Covariance parameters * \param[out] pars_trans Transformed covariance parameters */ void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) { CHECK(cov_pars.size() == num_cov_par_); cov_pars_trans = vec_t(num_cov_par_); if (gauss_likelihood_) { cov_pars_trans[0] = cov_pars[0]; } for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); vec_t pars_trans = pars; if (gauss_likelihood_) { re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans); } else { re_comps_[unique_clusters_[0]][j]->TransformCovPars(1., pars, pars_trans); } cov_pars_trans.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_trans; } } /*! * \brief Back-transform the covariance parameters to the original scale * \param cov_pars Covariance parameters * \param[out] cov_pars_orig Back-transformed, original covariance parameters */ void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) { CHECK(cov_pars.size() == num_cov_par_); cov_pars_orig = vec_t(num_cov_par_); if (gauss_likelihood_) { cov_pars_orig[0] = cov_pars[0]; } for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); vec_t pars_orig = pars; if (gauss_likelihood_) { re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig); } else { re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(1, pars, pars_orig); } cov_pars_orig.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_orig; } } /*! * \brief Calculate covariance matrices of the components */ void CalcSigmaComps() { for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_comps_total_; ++j) { re_comps_[cluster_i][j]->CalcSigma(); } } } /*! * \brief Construct inverse covariance matrix Sigma^-1 if there are onla grouped random effecs (this is then a diagonal matrix) * \param[out] SigmaI Inverse covariance matrix of random effects (a diagonal matrix) * \param cluster_i Cluster index for which SigmaI is constructed */ void CalcSigmaIGroupedREsOnly(sp_mat_t& SigmaI, data_size_t cluster_i) { CHECK(!only_one_grouped_RE_calculations_on_RE_scale_); std::vector<Triplet_t> triplets; triplets.reserve(cum_num_rand_eff_[cluster_i][num_comps_total_]); for (int j = 0; j < num_comps_total_; ++j) { double sigmaI = re_comps_[cluster_i][j]->cov_pars_[0]; sigmaI = 1.0 / sigmaI; for (int i = cum_num_rand_eff_[cluster_i][j]; i < cum_num_rand_eff_[cluster_i][j + 1]; ++i) { triplets.emplace_back(i, i, sigmaI); } } SigmaI = sp_mat_t(cum_num_rand_eff_[cluster_i][num_comps_total_], cum_num_rand_eff_[cluster_i][num_comps_total_]); SigmaI.setFromTriplets(triplets.begin(), triplets.end()); } /*! * \brief Update covariance parameters, apply step size safeguard, factorize covariance matrix, and calculate new value of objective function * \param[out] cov_pars Covariance parameters * \param nat_grad Gradient for gradient descent or = FI^-1 * gradient for Fisher scoring (="natural" gradient) * \param[out] lr_cov Learning rate (can be written on in case it get decreased) * \param profile_out_marginal_variance If true, the first parameter (marginal variance, nugget effect) is ignored * \param use_nesterov_acc If true, Nesterov acceleration is used * \param it Iteration number * \param optimizer_cov Optimizer used * \param[out] cov_pars_after_grad_aux Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description) * \param[out] cov_pars_after_grad_aux_lag1 Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description) * \param acc_rate_cov Nesterov acceleration speed * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param fixed_effects Fixed effects component of location parameter */ void UpdateCovPars(vec_t& cov_pars, const vec_t& nat_grad, double& lr_cov, bool profile_out_marginal_variance, bool use_nesterov_acc, int it, const string_t& optimizer_cov, vec_t& cov_pars_after_grad_aux, vec_t& cov_pars_after_grad_aux_lag1, double acc_rate_cov, int nesterov_schedule_version, int momentum_offset, const double* fixed_effects = nullptr) { vec_t cov_pars_new(num_cov_par_); if (profile_out_marginal_variance) { cov_pars_new[0] = cov_pars[0]; } double lr = lr_cov; bool decrease_found = false; bool halving_done = false; for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) { if (profile_out_marginal_variance) { cov_pars_new.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr * nat_grad.array()).exp().matrix();//make update on log-scale } else { cov_pars_new = (cov_pars.array().log() - lr * nat_grad.array()).exp().matrix();//make update on log-scale } // Apply Nesterov acceleration if (use_nesterov_acc) { cov_pars_after_grad_aux = cov_pars_new; ApplyMomentumStep(it, cov_pars_after_grad_aux, cov_pars_after_grad_aux_lag1, cov_pars_new, acc_rate_cov, nesterov_schedule_version, profile_out_marginal_variance, momentum_offset, true); // Note: (i) cov_pars_after_grad_aux and cov_pars_after_grad_aux_lag1 correspond to the parameters obtained after calculating the gradient before applying acceleration // (ii) cov_pars (below this) are the parameters obtained after applying acceleration (and cov_pars_lag1 is simply the value of the previous iteration) // We first apply a gradient step and then an acceleration step (and not the other way aroung) since this is computationally more efficient // (otherwise the covariance matrix needs to be factored twice: once for the gradient step (accelerated parameters) and once for calculating the // log-likelihood (non-accelerated parameters after gradient update) when checking for convergence at the end of an iteration. // However, performing the acceleration before or after the gradient update gives equivalent algorithms } CalcCovFactorOrModeAndNegLL(cov_pars_new, fixed_effects); // Safeguard agains too large steps by halving the learning rate when the objective increases if (neg_log_likelihood_ <= neg_log_likelihood_after_lin_coef_update_) { decrease_found = true; break; } else { halving_done = true; lr *= 0.5; acc_rate_cov *= 0.5; if (!gauss_likelihood_) { // Reset mode to previous value since also parameters are discarded for (const auto& cluster_i : unique_clusters_) { likelihood_[cluster_i]->ResetModeToPreviousValue(); } } } } if (halving_done) { if (optimizer_cov == "fisher_scoring") { Log::REDebug("GPModel covariance parameter estimation: No decrease in the objective function in iteration number %d. The learning rate has been decreased in this iteration.", it + 1); } else if (optimizer_cov == "gradient_descent") { lr_cov = lr; //permanently decrease learning rate (for Fisher scoring, this is not done. I.e., step halving is done newly in every iterarion of Fisher scoring) Log::REDebug("GPModel covariance parameter estimation: The learning rate has been decreased permanently since with the previous learning rate, there was no decrease in the objective function in iteration number %d. New learning rate = %g", it + 1, lr_cov); } } if (!decrease_found) { Log::REDebug("GPModel covariance parameter estimation: No decrease in the objective function in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_); } if (use_nesterov_acc) { cov_pars_after_grad_aux_lag1 = cov_pars_after_grad_aux; } cov_pars = cov_pars_new; }//end UpdateCovPars /*! * \brief Update linear regression coefficients and apply step size safeguard * \param[out] beta Linear regression coefficients * \param grad Gradient * \param[out] lr_coef Learning rate (can be written on in case it get decreased) * \param use_nesterov_acc If true, Nesterov acceleration is used * \param it Iteration number * \param[out] beta_after_grad_aux Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description) * \param[out] beta_after_grad_aux_lag1 Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description) * \param acc_rate_coef Nesterov acceleration speed * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param fixed_effects External fixed effects * \param[out] fixed_effects_vec Fixed effects component of location parameter as sum of linear predictor and potentiall additional external fixed effects */ void UpdateLinCoef(vec_t& beta, const vec_t& grad, double& lr_coef, const vec_t& cov_pars, bool use_nesterov_acc, int it, vec_t& beta_after_grad_aux, vec_t& beta_after_grad_aux_lag1, double acc_rate_coef, int nesterov_schedule_version, int momentum_offset, const double* fixed_effects, vec_t& fixed_effects_vec) { vec_t beta_new; double lr = lr_coef; bool decrease_found = false; bool halving_done = false; for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) { beta_new = beta - lr * grad; // Apply Nesterov acceleration if (use_nesterov_acc) { beta_after_grad_aux = beta_new; ApplyMomentumStep(it, beta_after_grad_aux, beta_after_grad_aux_lag1, beta_new, acc_rate_coef, nesterov_schedule_version, false, momentum_offset, false); //Note: use same version of Nesterov acceleration as for covariance parameters (see 'UpdateCovPars') } UpdateFixedEffects(beta_new, fixed_effects, fixed_effects_vec); if (gauss_likelihood_) { EvalNegLogLikelihoodOnlyUpdateFixedEffects(cov_pars.data(), neg_log_likelihood_after_lin_coef_update_); }//end if gauss_likelihood_ else {//non-Gaussian data neg_log_likelihood_after_lin_coef_update_ = -CalcModePostRandEff(fixed_effects_vec.data());//calculate mode and approximate marginal likelihood } // Safeguard agains too large steps by halving the learning rate when the objective increases if (neg_log_likelihood_after_lin_coef_update_ <= neg_log_likelihood_lag1_) { decrease_found = true; break; } else { // Safeguard agains too large steps by halving the learning rate halving_done = true; lr *= 0.5; acc_rate_coef *= 0.5; if (!gauss_likelihood_) { // Reset mode to previous value since also parameters are discarded for (const auto& cluster_i : unique_clusters_) { likelihood_[cluster_i]->ResetModeToPreviousValue(); } } } } if (halving_done) { lr_coef = lr; //permanently decrease learning rate (for Fisher scoring, this is not done. I.e., step halving is done newly in every iterarion of Fisher scoring) Log::REDebug("GPModel linear regression coefficient estimation: The learning rate has been decreased permanently since with the previous learning rate, there was no decrease in the objective function in iteration number %d. New learning rate = %g", it + 1, lr_coef); } if (!decrease_found) { Log::REDebug("GPModel linear regression coefficient estimation: No decrease in the objective function in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_); } if (use_nesterov_acc) { beta_after_grad_aux_lag1 = beta_after_grad_aux; } beta = beta_new; }//end UpdateLinCoef /*! * \brief Calculate the covariance matrix ZSigmaZt of the random effects (sum of all components) * \param[out] ZSigmaZt Covariance matrix ZSigmaZt * \param cluster_i Cluster index for which the covariance matrix is calculated */ void CalcZSigmaZt(T_mat& ZSigmaZt, data_size_t cluster_i) { ZSigmaZt = T_mat(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); if (gauss_likelihood_) { ZSigmaZt.setIdentity(); } else { ZSigmaZt.setZero(); } for (int j = 0; j < num_comps_total_; ++j) { ZSigmaZt += (*(re_comps_[cluster_i][j]->GetZSigmaZt())); } }//end CalcZSigmaZt /*! * \brief Calculate the covariance matrix ZSigmaZt if only_grouped_REs_use_woodbury_identity_==false or the inverse covariance matrix Sigma^-1 if there are only grouped REs i.e. if only_grouped_REs_use_woodbury_identity_==true. * This function is only used for non-Gaussian data as in the Gaussian case this needs not be saved */ void CalcCovMatrixNonGauss() { if (!only_one_grouped_RE_calculations_on_RE_scale_) {//Nothing to calculate if only_one_grouped_RE_calculations_on_RE_scale_ if (only_grouped_REs_use_woodbury_identity_) { for (const auto& cluster_i : unique_clusters_) { CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i); } } else { for (const auto& cluster_i : unique_clusters_) { if (num_comps_total_ == 1) {//no need to sum up different components ZSigmaZt_[cluster_i] = re_comps_[cluster_i][0]->GetZSigmaZt(); } else { T_mat ZSigmaZt; CalcZSigmaZt(ZSigmaZt, cluster_i); ZSigmaZt_[cluster_i] = std::make_shared<T_mat>(ZSigmaZt); } } } } }//end CalcCovMatrixNonGauss /*! * \brief Calculate the mode of the posterior of the latent random effects for use in the Laplace approximation. This function is only used for non-Gaussian data * \param fixed_effects Fixed effects component of location parameter * \return Approximate marginal log-likelihood evaluated at the mode */ double CalcModePostRandEff(const double* fixed_effects = nullptr) { double mll = 0.; double mll_cluster_i; const double* fixed_effects_cluster_i_ptr = nullptr; vec_t fixed_effects_cluster_i; for (const auto& cluster_i : unique_clusters_) { if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter fixed_effects_cluster_i_ptr = fixed_effects; } else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying? //TODO: this is quite inefficient as the mapping of the fixed_effects to the different clusters is done repeatedly for the same data. Could be saved if performance is an issue here. #pragma omp parallel for schedule(static) for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]]; } fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data(); } if (vecchia_approx_) { likelihood_[cluster_i]->FindModePostRandEffCalcMLLVecchia(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], B_[cluster_i], D_inv_[cluster_i], mll_cluster_i); } else { if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->FindModePostRandEffCalcMLLGroupedRE(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], SigmaI_[cluster_i], Zt_[cluster_i], mll_cluster_i); } else if (only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], re_comps_[cluster_i][0]->cov_pars_[0], re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), mll_cluster_i); } else if (only_one_GP_calculations_on_RE_scale_) { likelihood_[cluster_i]->FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), mll_cluster_i); //Note: ZSigmaZt_[cluster_i] contains Sigma=Cov(b) and not Z*Sigma*Zt since has_Z_==false for this random effects component } else { likelihood_[cluster_i]->FindModePostRandEffCalcMLLStable(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], mll_cluster_i); } } mll += mll_cluster_i; } return(mll); }//CalcModePostRandEff /*! * \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP) * \param num_data_cluster_i Number of data points * \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation) * \param re_comps_cluster_i Container that collects the individual component models * \param nearest_neighbors_cluster_i Collects indices of nearest neighbors * \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors * \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations * \param entries_init_B_cluster_i Triplets for intializing the matrices B * \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad * \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j * \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation * \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation * \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation * \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation * \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true * \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back) * \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance */ void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i, std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i, std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i, std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) { int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_; int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget; //Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel) B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below) if (!transf_scale) { D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale } if (!gauss_likelihood_) { D_inv_cluster_i.diagonal().array() *= 0.; } if (calc_gradient) { B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A) D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D for (int ipar = 0; ipar < num_par_gp; ++ipar) { B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i); B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end()); D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i); D_grad_cluster_i[ipar].setIdentity();//Put 0 on the diagonal D_grad_cluster_i[ipar].diagonal().array() = 0.;//TODO: maybe change initialization of this matrix by also using triplets -> faster? } }//end initialization #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_cluster_i; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives den_mat_t cov_mat_obs_neighbors(1, num_nn); den_mat_t cov_mat_between_neighbors(num_nn, num_nn); std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp); if (i > 0) { for (int j = 0; j < num_gp_total_; ++j) { int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors if (j == 0) { re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct() cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; if (calc_gradient) { cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); } } }//end loop over components j }//end if(i>1) //Calculate matrices B and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0]; if (!transf_scale) { d_comp_j *= nugget_var; } if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } D_inv_cluster_i.coeffRef(i, i) += d_comp_j; if (calc_gradient) { if (transf_scale) { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal } else { if (j == 0) { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale } else { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } } } } if (calc_gradient && calc_gradient_nugget) { D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.; } //2. remaining terms if (i > 0) { if (gauss_likelihood_) { if (transf_scale) { cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect } else { cov_mat_between_neighbors.diagonal().array() += nugget_var; } } //else {//Seems unnecessary // cov_mat_between_neighbors.diagonal().array() += 1e-10;//Avoid numerical problems when there is no nugget effect //} den_mat_t A_i(1, num_nn); den_mat_t cov_mat_between_neighbors_inv; den_mat_t A_i_grad_sigma2; if (calc_gradient) { // Note: it is faster (approx. 1.5-2 times) to first calculate cov_mat_between_neighbors_inv and the multiply this with the matrices below // instead of always using the Cholesky factor of cov_mat_between_neighbors to calculate cov_mat_between_neighbors_inv * (a matrix) den_mat_t I(num_nn, num_nn); I.setIdentity(); cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I); A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv; if (calc_gradient_nugget) { A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv; } } else { A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); } for (int inn = 0; inn < num_nn; ++inn) { B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn); } D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); if (calc_gradient) { den_mat_t A_i_grad(1, num_nn); for (int j = 0; j < num_gp_total_; ++j) { int ind_first_par = j * num_par_comp; for (int ipar = 0; ipar < num_par_comp; ++ipar) { A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) - (cov_mat_obs_neighbors * cov_mat_between_neighbors_inv * cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv); for (int inn = 0; inn < num_nn; ++inn) { B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn); } if (ipar == 0) { D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) + (A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance } else { D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) + (A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range } } } if (calc_gradient_nugget) { for (int inn = 0; inn < num_nn; ++inn) { B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn); } D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0); } }//end calc_gradient }//end if i > 0 D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i); }//end loop over data i }//end CalcCovFactorVecchia /*! * \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix) * Use only for Gaussian data * \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation) * \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation) * \param nugget_var Nugget effect variance parameter sigma^2 (used only if vecchia_approx_==true and transf_scale ==false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out) * \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation) */ void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) { if (vecchia_approx_) { for (const auto& cluster_i : unique_clusters_) { int num_data_cl_i = num_data_per_cluster_[cluster_i]; CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i], dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i], entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i], B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget); } } else { CalcSigmaComps(); for (const auto& cluster_i : unique_clusters_) { if (only_grouped_REs_use_woodbury_identity_) {//Use Woodburry matrix inversion formula: used only if there are only grouped REs if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i); sqrt_diag_SigmaI_plus_ZtZ_[cluster_i] = (SigmaI_[cluster_i].diagonal().array() + ZtZ_[cluster_i].diagonal().array()).sqrt().matrix(); } else { sp_mat_t SigmaI; CalcSigmaIGroupedREsOnly(SigmaI, cluster_i); T_mat SigmaIplusZtZ = SigmaI + ZtZ_[cluster_i]; CalcChol<T_mat>(SigmaIplusZtZ, cluster_i); } }//end only_grouped_REs_use_woodbury_identity_ else {//not only_grouped_REs_use_woodbury_identity_ T_mat psi; CalcZSigmaZt(psi, cluster_i); CalcChol<T_mat>(psi, cluster_i); }//end not only_grouped_REs_use_woodbury_identity_ } } covariance_matrix_has_been_factorized_ = true; } /*! * \brief Calculate Psi^-1*y (and save in y_aux_) * \param marg_variance The marginal variance. Default = 1. */ void CalcYAux(double marg_variance = 1.) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (!covariance_matrix_has_been_factorized_) { Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } if (vecchia_approx_) { y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i]; }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_) { vec_t MInvZty; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvZty = (Zty_[cluster_i].array() / sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().square()).matrix(); } else { MInvZty = chol_facts_solve_[cluster_i].solve(Zty_[cluster_i]); } y_aux_[cluster_i] = y_[cluster_i] - Zt_[cluster_i].transpose() * MInvZty; } else { //Version 1: let Eigen do the computation y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]); //// Version 2 'do-it-yourself' (for sparse matrices) //y_aux_[cluster_i] = y_[cluster_i]; //const double* val = chol_facts_[cluster_i].valuePtr(); //const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); //const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); //sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data()); //sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data()); } }//end non-Vecchia if (marg_variance != 1.) { y_aux_[cluster_i] /= marg_variance; } } y_aux_has_been_calculated_ = true; } /*! * \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if sparse matrices are used * \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcYtilde(bool also_calculate_ytilde2 = false) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal y_tilde_[cluster_i] = (Zty_[cluster_i].array() / sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array()).matrix(); if (also_calculate_ytilde2) { y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array()).matrix()); } } else { y_tilde_[cluster_i] = Zty_[cluster_i]; if (chol_fact_has_permutation_) {//Apply permutation if an ordering is used y_tilde_[cluster_i] = chol_facts_solve_[cluster_i].permutationP() * y_tilde_[cluster_i]; } const double* val = chol_facts_[cluster_i].valuePtr(); const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); sp_L_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data()); if (also_calculate_ytilde2) { vec_t ytilde_aux = y_tilde_[cluster_i]; sp_L_t_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data()); if (chol_fact_has_permutation_) {//Apply permutation if an ordering is used ytilde_aux = chol_facts_solve_[cluster_i].permutationP().transpose() * ytilde_aux; } y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux; } } } } /*! * \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if dense matrices are used * \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcYtilde(bool also_calculate_ytilde2 = false) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal y_tilde_[cluster_i] = y_tilde_[cluster_i] = (Zty_[cluster_i].array() / sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array()).matrix(); if (also_calculate_ytilde2) { y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array()).matrix()); } } else { y_tilde_[cluster_i] = Zty_[cluster_i]; L_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data()); if (also_calculate_ytilde2) { vec_t ytilde_aux = y_tilde_[cluster_i]; L_t_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data()); y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux; } } } } /*! * \brief Calculate y^T*Psi^-1*y if sparse matrices are used * \param[out] yTPsiInvy y^T*Psi^-1*y * \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored * \param cluster_ind Cluster index * \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not only_grouped_REs_use_woodbury_identity_) * \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_) */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters, data_size_t cluster_ind, bool CalcYAux_already_done, bool CalcYtilde_already_done) { yTPsiInvy = 0; std::vector<data_size_t> clusters_iterate; if (all_clusters) { clusters_iterate = unique_clusters_; } else { clusters_iterate = std::vector<data_size_t>(1); clusters_iterate[0] = cluster_ind; } for (const auto& cluster_i : clusters_iterate) { if (y_.find(cluster_i) == y_.end()) { Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (!covariance_matrix_has_been_factorized_) { Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } if (vecchia_approx_) { if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i]; yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0); } }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_) { if (!CalcYtilde_already_done) { CalcYtilde<T_mat>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) { Log::REFatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first."); } yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0); }//end only_grouped_REs_use_woodbury_identity_ else {//not only_grouped_REs_use_woodbury_identity_ if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { vec_t y_aux_sqrt = y_[cluster_i]; if (chol_fact_has_permutation_) {//Apply permutation if an ordering is used y_aux_sqrt = chol_facts_solve_[cluster_i].permutationP() * y_aux_sqrt; } const double* val = chol_facts_[cluster_i].valuePtr(); const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_sqrt.data()); yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0); } }//end not only_grouped_REs_use_woodbury_identity_ }//end not vecchia_approx_ } }//end CalcYTPsiIInvY for sparse matrices /*! * \brief Calculate y^T*Psi^-1*y if dense matrices are used * \param[out] yTPsiInvy y^T*Psi^-1*y * \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored * \param cluster_ind Cluster index * \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not only_grouped_REs_use_woodbury_identity_) * \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_) */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters, data_size_t cluster_ind, bool CalcYAux_already_done, bool CalcYtilde_already_done) { yTPsiInvy = 0; std::vector<data_size_t> clusters_iterate; if (all_clusters) { clusters_iterate = unique_clusters_; } else { clusters_iterate = std::vector<data_size_t>(1); clusters_iterate[0] = cluster_ind; } for (const auto& cluster_i : clusters_iterate) { if (y_.find(cluster_i) == y_.end()) { Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (!covariance_matrix_has_been_factorized_) { Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } if (vecchia_approx_) { if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i]; yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0); } }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_) { if (!CalcYtilde_already_done) { CalcYtilde<T_mat>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) { Log::REFatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first."); } yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0); }//end only_grouped_REs_use_woodbury_identity_ else {//not only_grouped_REs_use_woodbury_identity_ if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { vec_t y_aux_sqrt = y_[cluster_i]; L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], y_aux_sqrt.data()); yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0); } }//end not only_grouped_REs_use_woodbury_identity_ }//end not vecchia_approx_ } }//end CalcYTPsiIInvY for dense matrices /*! * \brief Calculate gradient for covariance parameters * This assumes that the covariance matrix has been factorized (by 'CalcCovFactor') and that y_aux or y_tilde/y_tilde2 (if only_grouped_REs_use_woodbury_identity_) have been calculated (by 'CalcYAux' or 'CalcYtilde') * \param cov_pars Covariance parameters * \param[out] grad Gradient w.r.t. covariance parameters * \param include_error_var If true, the gradient for the marginal variance parameter (=error, nugget effect) is also calculated, otherwise not (set this to true if the nugget effect is not calculated by using the closed-form solution) * \param save_psi_inv If true, the inverse covariance matrix Psi^-1 is saved for reuse later (e.g. when calculating the Fisher information in Fisher scoring). This option is ignored if the Vecchia approximation is used. * \param fixed_effects Fixed effects component of location parameter (used only for non-Gaussian data) */ void CalcCovParGrad(vec_t& cov_pars, vec_t& cov_grad, bool include_error_var = false, bool save_psi_inv = false, const double* fixed_effects = nullptr) { if (gauss_likelihood_) {//Gaussian data if (include_error_var) { cov_grad = vec_t::Zero(num_cov_par_); } else { cov_grad = vec_t::Zero(num_cov_par_ - 1); } int first_cov_par = include_error_var ? 1 : 0; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) {//Vechia approximation vec_t u(num_data_per_cluster_[cluster_i]); vec_t uk(num_data_per_cluster_[cluster_i]); if (include_error_var) { u = B_[cluster_i] * y_[cluster_i]; cov_grad[0] += -1. * ((double)(u.transpose() * D_inv_[cluster_i] * u)) / cov_pars[0] / 2. + num_data_per_cluster_[cluster_i] / 2.; u = D_inv_[cluster_i] * u; } else { u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here? } for (int j = 0; j < num_comps_total_; ++j) { int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_; for (int ipar = 0; ipar < num_par_comp; ++ipar) { uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i]; cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / cov_pars[0] + 0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal())); } } }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_) { if (include_error_var) { double yTPsiInvy; CalcYTPsiIInvY<T_mat>(yTPsiInvy, false, cluster_i, true, true); cov_grad[0] += -1. * yTPsiInvy / cov_pars[0] / 2. + num_data_per_cluster_[cluster_i] / 2.; } std::vector<T_mat> LInvZtZj_cluster_i; if (save_psi_inv) { LInvZtZj_[cluster_i].clear(); LInvZtZj_cluster_i = std::vector<T_mat>(num_comps_total_); } for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); vec_t y_tilde_j = (*Z_j).transpose() * y_[cluster_i]; vec_t y_tilde2_j = (*Z_j).transpose() * y_tilde2_[cluster_i]; double yTPsiIGradPsiPsiIy = y_tilde_j.transpose() * y_tilde_j - 2. * (double)(y_tilde_j.transpose() * y_tilde2_j) + y_tilde2_j.transpose() * y_tilde2_j; yTPsiIGradPsiPsiIy *= cov_pars[j + 1]; T_mat LInvZtZj; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal LInvZtZj = ZtZ_[cluster_i]; LInvZtZj.diagonal().array() /= sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array(); } else { if (chol_fact_has_permutation_) { CalcPsiInvSqrtH(P_ZtZj_[cluster_i][j], LInvZtZj, cluster_i, true, false); } else { CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj, cluster_i, true, false); } } if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information LInvZtZj_cluster_i[j] = LInvZtZj; } double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj.squaredNorm(); trace_PsiInvGradPsi *= cov_pars[j + 1]; cov_grad[first_cov_par + j] += -1. * yTPsiIGradPsiPsiIy / cov_pars[0] / 2. + trace_PsiInvGradPsi / 2.; } if (save_psi_inv) { LInvZtZj_[cluster_i] = LInvZtZj_cluster_i; } }//end only_grouped_REs_use_woodbury_identity_ else {//not only_grouped_REs_use_woodbury_identity_ T_mat psi_inv; CalcPsiInv(psi_inv, cluster_i); if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information psi_inv_[cluster_i] = psi_inv; } if (include_error_var) { cov_grad[0] += -1. * ((double)(y_[cluster_i].transpose() * y_aux_[cluster_i])) / cov_pars[0] / 2. + num_data_per_cluster_[cluster_i] / 2.; } for (int j = 0; j < num_comps_total_; ++j) { for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) { std::shared_ptr<T_mat> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar, true, 1.); cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / cov_pars[0] / 2. + ((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.; } } }//end not only_grouped_REs_use_woodbury_identity_ }//end not vecchia_approx_ }// end loop over clusters }//end gauss_likelihood_ else {//not gauss_likelihood_ if (include_error_var) { Log::REFatal("There is no error variance (nugget effect) for non-Gaussian data"); } cov_grad = vec_t::Zero(num_cov_par_); vec_t cov_grad_cluster_i(num_cov_par_); vec_t empty_unused_vec(0);//placeholder for fixed effects gradient const double* fixed_effects_cluster_i_ptr = nullptr; vec_t fixed_effects_cluster_i; for (const auto& cluster_i : unique_clusters_) { //map fixed effects to clusters (if needed) vec_t grad_F_cluster_i(num_data_per_cluster_[cluster_i]); if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter fixed_effects_cluster_i_ptr = fixed_effects; } else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying? #pragma omp parallel for schedule(static) for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]]; } fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data(); } if (vecchia_approx_) {//Vechia approximation likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxVecchia(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], true, false, cov_grad_cluster_i.data(), empty_unused_vec, false); }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxGroupedRE(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], SigmaI_[cluster_i], Zt_[cluster_i], cum_num_rand_eff_[cluster_i], true, false, cov_grad_cluster_i.data(), empty_unused_vec, false); }//end only_grouped_REs_use_woodbury_identity_ else if (only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], re_comps_[cluster_i][0]->cov_pars_[0], re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), true, false, cov_grad_cluster_i.data(), empty_unused_vec, false); } else if (only_one_GP_calculations_on_RE_scale_) { likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), re_comps_[cluster_i], true, false, cov_grad_cluster_i.data(), empty_unused_vec, false); } else {//not only_grouped_REs_use_woodbury_identity_ and not only_one_GP_calculations_on_RE_scale_ likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxStable(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], re_comps_[cluster_i], true, false, cov_grad_cluster_i.data(), empty_unused_vec, false); }//end not only_grouped_REs_use_woodbury_identity_ }//end not vecchia_approx_ cov_grad += cov_grad_cluster_i; }// end loop over clusters }//end not gauss_likelihood_ }//end CalcCovParGrad /*! * \brief Apply a momentum step * \param it Iteration number * \param pars Parameters * \param pars_lag1 Parameters from last iteration * \param[out] pars_acc Accelerated parameters * \param nesterov_acc_rate Nesterov acceleration speed * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param log_scale If true, the momentum step is done on the log-scale */ void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, vec_t& pars_acc, double nesterov_acc_rate = 0.5, int nesterov_schedule_version = 0, bool exclude_first_log_scale = true, int momentum_offset = 2, bool log_scale = false) { double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset); int num_par = (int)pars.size(); if (exclude_first_log_scale) { pars_acc[0] = pars[0]; pars_acc.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale } else { if (log_scale) { pars_acc = ((mu + 1.) * (pars.array().log()) - mu * (pars_lag1.array().log())).exp().matrix(); } else { pars_acc = (mu + 1) * pars - mu * pars_lag1; } } } /*! * \brief Calculate gradient for linear fixed-effect coefficients * \param marg_var Marginal variance parameters sigma^2 (only used for Gaussian data) * \param beta Linear regression coefficients * \param[out] grad_beta Gradient for linear regression coefficients * \param fixed_effects Fixed effects component of location parameter for observed data (only used for non-Gaussian data) */ void CalcLinCoefGrad(double marg_var, const vec_t beta, vec_t& grad_beta, const double* fixed_effects = nullptr) { if (gauss_likelihood_) { const vec_t resid = y_vec_ - (X_ * beta); SetY(resid.data()); CalcYAux(); vec_t y_aux(num_data_); GetYAux(y_aux); grad_beta = (-1. / marg_var) * (X_.transpose()) * y_aux; } else { vec_t grad_F(num_data_); CalcGradFLaplace(grad_F.data(), fixed_effects); grad_beta = (X_.transpose()) * grad_F; } } /*! * \brief Update linear fixed-effect coefficients using generalized least squares (GLS) * \param X Covariate data for linear fixed-effect * \param[out] beta Linear regression coefficients */ void UpdateCoefGLS(den_mat_t& X, vec_t& beta) { vec_t y_aux(num_data_); GetYAux(y_aux); den_mat_t XT_psi_inv_X; CalcXTPsiInvX(X, XT_psi_inv_X); beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux); } /*! * \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first * \param cov_pars Covariance parameters * \param[out] FI Fisher information * \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true * \param include_error_var If true, the marginal variance parameter is also included, otherwise not * \param use_saved_psi_inv If false, the inverse covariance matrix Psi^-1 is calculated, otherwise a saved version is used */ void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true, bool include_error_var = false, bool use_saved_psi_inv = false) { if (include_error_var) { FI = den_mat_t(num_cov_par_, num_cov_par_); } else { FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1); } FI.setZero(); int start_cov_pars = include_error_var ? 1 : 0; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { //Note: if transf_scale==false, then all matrices and derivatives have been calculated on the original scale for the Vecchia approximation, that is why there is no adjustment here //Calculate auxiliary matrices for use below sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); Identity.setIdentity(); sp_mat_t B_inv; eigen_sp_Lower_sp_RHS_solve(B_[cluster_i], Identity, B_inv, true);//No noticeable difference in (n=500, nn=100/30) compared to using eigen_sp_Lower_sp_RHS_cs_solve() //eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true); sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); D.setIdentity(); D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1); sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); D_inv_2.setIdentity(); D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2); //Calculate derivative(B) * B^-1 std::vector<sp_mat_t> B_grad_B_inv(num_cov_par_ - 1); for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { B_grad_B_inv[par_nb] = B_grad_[cluster_i][par_nb] * B_inv; } //Calculate Fisher information sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D; if (include_error_var) { //First calculate terms for nugget effect / noise variance parameter if (transf_scale) {//Optimization is done on transformed scale (in particular, log-scale) //The derivative for the nugget variance on the log scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix. FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.; for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { FI(0, par_nb + 1) += (double)((D_inv_[cluster_i].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()) / 2.; } } else {//Original scale for asymptotic covariance matrix int ind_grad_nugget = num_cov_par_ - 1; D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv; B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D; double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum()); FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { B_grad_B_inv_D = B_grad_B_inv[par_nb] * D; diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()); FI(0, par_nb + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); } } } //Remaining covariance parameters for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_B_inv[par_nb]; for (int par_nb_cross = par_nb; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) { B_grad_B_inv_D = B_grad_B_inv[par_nb_cross] * D; double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array() * D_grad_[cluster_i][par_nb_cross].diagonal().array()).sum()); FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); } } }//end vecchia_approx_ else {//not vecchia_approx_ if (only_grouped_REs_use_woodbury_identity_) { //Notation used below: M = Sigma^-1 + ZtZ, Sigma = cov(b) b=latent random effects, L=chol(M) i.e. M=LLt, MInv = M^-1 = L^-TL^-1 if (!use_saved_psi_inv) { LInvZtZj_[cluster_i] = std::vector<T_mat>(num_comps_total_); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal LInvZtZj_[cluster_i][0] = ZtZ_[cluster_i]; LInvZtZj_[cluster_i][0].diagonal().array() /= sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array(); } else { for (int j = 0; j < num_comps_total_; ++j) { if (chol_fact_has_permutation_) { CalcPsiInvSqrtH(P_ZtZj_[cluster_i][j], LInvZtZj_[cluster_i][j], cluster_i, true, false); } else { CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj_[cluster_i][j], cluster_i, true, false); } } } } if (include_error_var) { if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale) //The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix. FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.; for (int j = 0; j < num_comps_total_; ++j) { double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj_[cluster_i][j].squaredNorm(); FI(0, j + 1) += trace_PsiInvGradPsi * cov_pars[j + 1] / 2.; } }//end transf_scale else {//not transf_scale T_mat MInv_ZtZ;//=(Sigma_inv + ZtZ)^-1 * ZtZ if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal MInv_ZtZ = T_mat(ZtZ_[cluster_i].rows(), ZtZ_[cluster_i].cols()); MInv_ZtZ.setIdentity();//initialize MInv_ZtZ.diagonal().array() = ZtZ_[cluster_i].diagonal().array() / (sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().square()); } else { T_mat ZtZ = T_mat(ZtZ_[cluster_i]);//TODO: this step is not needed for sparse matrices (i.e. copying is not required) MInv_ZtZ = chol_facts_solve_[cluster_i].solve(ZtZ); } T_mat MInv_ZtZ_t = MInv_ZtZ.transpose();//TODO: possible without saving MInv_ZtZ.transpose()? -> compiler problem in MInv_ZtZ.cwiseProduct(MInv_ZtZ.transpose()) FI(0, 0) += (num_data_per_cluster_[cluster_i] - 2. * MInv_ZtZ.diagonal().sum() + (double)(MInv_ZtZ.cwiseProduct(MInv_ZtZ_t)).sum()) / (cov_pars[0] * cov_pars[0] * 2.); for (int j = 0; j < num_comps_total_; ++j) { T_mat ZjZ_MInv_ZtZ_t = MInv_ZtZ_t * ZtZj_[cluster_i][j]; T_mat ZtZj = T_mat(ZtZj_[cluster_i][j]); double trace_PsiInvGradPsi; if (num_comps_total_ > 1) { T_mat MInv_ZtZj = chol_facts_solve_[cluster_i].solve(ZtZj); trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) + (double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZj)).sum(); } else { trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) + (double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZ)).sum(); } FI(0, j + 1) += trace_PsiInvGradPsi / (cov_pars[0] * cov_pars[0] * 2.); } }//end not transf_scale }//end include_error_var //Remaining covariance parameters for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); for (int k = j; k < num_comps_total_; ++k) { sp_mat_t* Z_k = re_comps_[cluster_i][k]->GetZ(); sp_mat_t Zjt_Zk = (*Z_j).transpose() * (*Z_k); T_mat LInvZtZj_t_LInvZtZk = LInvZtZj_[cluster_i][j].transpose() * LInvZtZj_[cluster_i][k]; double FI_jk = Zjt_Zk.squaredNorm() + LInvZtZj_t_LInvZtZk.squaredNorm() - 2. * (double)(Zjt_Zk.cwiseProduct(LInvZtZj_t_LInvZtZk)).sum(); if (transf_scale) { FI_jk *= cov_pars[j + 1] * cov_pars[k + 1]; } else { FI_jk /= cov_pars[0] * cov_pars[0]; } FI(j + start_cov_pars, k + start_cov_pars) += FI_jk / 2.; } } }//end only_grouped_REs_use_woodbury_identity_ else {//not only_grouped_REs_use_woodbury_identity_ T_mat psi_inv; if (use_saved_psi_inv) { psi_inv = psi_inv_[cluster_i]; } else { CalcPsiInv(psi_inv, cluster_i); } if (!transf_scale) { psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix } //Calculate Psi^-1 * derivative(Psi) std::vector<T_mat> psi_inv_deriv_psi(num_cov_par_ - 1); int deriv_par_nb = 0; for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) { psi_inv_deriv_psi[deriv_par_nb] = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0])); deriv_par_nb++; } } //Calculate Fisher information if (include_error_var) { //First calculate terms for nugget effect / noise variance parameter if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale) //The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix. FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.; for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { FI(0, par_nb + 1) += psi_inv_deriv_psi[par_nb].diagonal().sum() / 2.; } } else {//Original scale for asymptotic covariance matrix //The derivative for the nugget variance is the identity matrix, i.e. psi_inv_grad_psi_sigma2 = psi_inv. FI(0, 0) += ((double)(psi_inv.cwiseProduct(psi_inv)).sum()) / 2.; for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { FI(0, par_nb + 1) += ((double)(psi_inv.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.; } } } //Remaining covariance parameters for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { T_mat psi_inv_grad_psi_par_nb_T = psi_inv_deriv_psi[par_nb].transpose(); FI(par_nb + start_cov_pars, par_nb + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.; for (int par_nb_cross = par_nb + 1; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) { FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb_cross])).sum()) / 2.; } psi_inv_deriv_psi[par_nb].resize(0, 0);//not needed anymore psi_inv_grad_psi_par_nb_T.resize(0, 0); } }//end not only_grouped_REs_use_woodbury_identity_ }//end not vecchia_approx_ }//end loop over clusters FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose(); //for (int i = 0; i < std::min((int)FI.rows(),4); ++i) {//For debugging only // for (int j = i; j < std::min((int)FI.cols(),4); ++j) { // Log::REInfo("FI(%d,%d) %g", i, j, FI(i, j)); // } //} } /*! * \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization, for Gaussian data only) * \param cov_pars MLE of covariance parameters * \param[out] std_dev Standard deviations */ void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) { SetCovParsComps(cov_pars); CalcCovFactor(true, false, cov_pars[0], true); den_mat_t FI; CalcFisherInformation(cov_pars, FI, false, true, false); std_dev = FI.inverse().diagonal().array().sqrt().matrix(); } /*! * \brief Calculate standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information (for Gaussian data only) * \param cov_pars MLE of covariance parameters * \param X Covariate data for linear fixed-effect * \param[out] std_dev Standard deviations */ void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) { if ((int)std_dev.size() >= num_data_) { Log::REWarning("Sample size too small to calculate standard deviations for coefficients"); for (int i = 0; i < (int)std_dev.size(); ++i) { std_dev[i] = std::numeric_limits<double>::quiet_NaN(); } } else { SetCovParsComps(cov_pars); CalcCovFactor(false, true, 1., false); den_mat_t FI((int)X.cols(), (int)X.cols()); CalcXTPsiInvX(X, FI); FI /= cov_pars[0]; std_dev = FI.inverse().diagonal().array().sqrt().matrix(); } } /*! * \brief Calculate standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information * \param num_covariates Number of covariates / coefficients * \param beta Regression coefficients * \param cov_pars Covariance parameters * \param fixed_effects Externally provided fixed effects component of location parameter * \param[out] std_dev_beta Standard deviations */ void CalcStdDevCoefNonGaussian(int num_covariates, const vec_t& beta, const vec_t& cov_pars, const double* fixed_effects, vec_t& std_dev_beta) { den_mat_t H(num_covariates, num_covariates);// Aproximate Hessian calculated as the Jacobian of the gradient const double mach_eps = std::numeric_limits<double>::epsilon(); vec_t delta_step = beta * std::pow(mach_eps, 1.0 / 3.0);// based on https://math.stackexchange.com/questions/1039428/finite-difference-method vec_t fixed_effects_vec, beta_change1, beta_change2, grad_beta_change1, grad_beta_change2; for (int i = 0; i < num_covariates; ++i) { // Beta plus / minus delta beta_change1 = beta; beta_change2 = beta; beta_change1[i] += delta_step[i]; beta_change2[i] -= delta_step[i]; // Gradient vector at beta plus / minus delta UpdateFixedEffects(beta_change1, fixed_effects, fixed_effects_vec); CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_vec.data()); CalcLinCoefGrad(1., beta_change1, grad_beta_change1, fixed_effects_vec.data()); UpdateFixedEffects(beta_change2, fixed_effects, fixed_effects_vec); CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_vec.data()); CalcLinCoefGrad(1., beta_change2, grad_beta_change2, fixed_effects_vec.data()); // Approximate gradient of gradient H.row(i) = (grad_beta_change1 - grad_beta_change2) / (2. * delta_step[i]); } den_mat_t Hsym = (H + H.transpose()) / 2.; // (Very) approximate standard deviations as square root of diagonal of inverse Hessian std_dev_beta = Hsym.inverse().diagonal().array().sqrt().matrix(); } /*! * \brief Find minimum for paramters using an external optimization library (cppoptlib) * \param cov_pars[out] Covariance parameters (initial values and output written on it) * \param beta[out] Linear regression coefficients (if there are any) (initial values and output written on it) * \param fixed_effects Externally provided fixed effects component of location parameter (only used for non-Gaussian data) * \param max_iter Maximal number of iterations * \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value * \param num_it[out] Number of iterations * \param learn_covariance_parameters If true, covariance parameters are estimated */ void OptimExternal(vec_t& cov_pars, vec_t& beta, const double* fixed_effects, int max_iter, double delta_rel_conv, int& num_it, bool learn_covariance_parameters) { // Some checks CHECK(num_cov_par_ == (int)cov_pars.size()); if (has_covariates_) { CHECK(beta.size() == X_.cols()); } // Determine number of covariance and linear regression coefficient paramters int num_cov_pars_optim, num_covariates; if (learn_covariance_parameters) { if (gauss_likelihood_) { num_cov_pars_optim = num_cov_par_ - 1; } else { num_cov_pars_optim = num_cov_par_; } } else { num_cov_pars_optim = 0; } if (has_covariates_) { num_covariates = (int)beta.size(); } else { num_covariates = 0; } // Initialization of parameters vec_t pars_init; if (gauss_likelihood_) { pars_init = vec_t(num_cov_pars_optim + num_covariates); if (learn_covariance_parameters) { pars_init.segment(0, num_cov_pars_optim) = cov_pars.segment(1, num_cov_pars_optim).array().log().matrix();//exclude nugget and transform to log-scale } if (has_covariates_) { pars_init.segment(num_cov_pars_optim, num_covariates) = beta;//regresion coefficients } }//end gauss_likelihood_ else {//non-Gaussian data pars_init = vec_t(num_cov_pars_optim + num_covariates); if (learn_covariance_parameters) { pars_init.segment(0, num_cov_pars_optim) = cov_pars.array().log().matrix();//transform to log-scale } if (has_covariates_) { pars_init.segment(num_cov_pars_optim, num_covariates) = beta;//regresion coefficients } } //Do optimization OptDataOptimLib<T_mat, T_chol> opt_data = OptDataOptimLib<T_mat, T_chol>(this, fixed_effects, learn_covariance_parameters, cov_pars); optim::algo_settings_t settings; settings.iter_max = max_iter; settings.rel_objfn_change_tol = delta_rel_conv; optim::nm(pars_init, EvalLLforOptimLib<T_mat, T_chol>, &opt_data, settings); num_it = (int)settings.opt_iter; neg_log_likelihood_ = settings.opt_fn_value; // Transform parameters back for export if (gauss_likelihood_) { if (learn_covariance_parameters) { cov_pars[0] = sigma2_; cov_pars.segment(1, num_cov_pars_optim) = pars_init.segment(0, num_cov_pars_optim).array().exp().matrix();//back-transform to original scale } if (has_covariates_) { beta = pars_init.segment(num_cov_pars_optim, num_covariates); } }//end gauss_likelihood_ else {//non-Gaussian data if (learn_covariance_parameters) { cov_pars = pars_init.segment(0, num_cov_pars_optim).array().exp().matrix();//back-transform to original scale } if (has_covariates_) { beta = pars_init.segment(num_cov_pars_optim, num_covariates); } } }//end OptimExternal /*! * \brief Calculate predictions (conditional mean and covariance matrix) for one cluster * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Total number of prediction locations (over all clusters) * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j) * \param re_group_rand_coef_data_pred Random coefficient data for grouped REs * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the predictive/conditional covariance matrix is calculated (default=false) (predict_var and predict_cov_mat cannot be both true) * \param predict_var If true, the predictive/conditional variances are calculated (default=false) (predict_var and predict_cov_mat cannot be both true) * \param[out] mean_pred_id Predictive mean * \param[out] cov_mat_pred_id Predictive covariance matrix * \param[out] var_pred_id Predictive variances */ void CalcPred(data_size_t cluster_i, int num_data_pred, std::map<data_size_t, int>& num_data_per_cluster_pred, std::map<data_size_t, std::vector<int>>& data_indices_per_cluster_pred, const std::vector<std::vector<re_group_t>>& re_group_levels_pred, const double* re_group_rand_coef_data_pred, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, bool predict_var, vec_t& mean_pred_id, T_mat& cov_mat_pred_id, vec_t& var_pred_id) { int num_REs_obs, num_REs_pred; if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { num_REs_pred = (int)re_group_levels_pred[0].size(); num_REs_obs = re_comps_[cluster_i][0]->GetNumUniqueREs(); } else if (only_one_GP_calculations_on_RE_scale_) { num_REs_pred = (int)gp_coords_mat_pred.rows(); num_REs_obs = re_comps_[cluster_i][0]->GetNumUniqueREs(); } else { num_REs_pred = num_data_per_cluster_pred[cluster_i]; num_REs_obs = num_data_per_cluster_[cluster_i]; } if (predict_var) { if (gauss_likelihood_) { var_pred_id = vec_t::Ones(num_REs_pred);//nugget effect } else { var_pred_id = vec_t::Zero(num_REs_pred); } } if (predict_cov_mat) { cov_mat_pred_id = T_mat(num_REs_pred, num_REs_pred); if (gauss_likelihood_) { cov_mat_pred_id.setIdentity();//nugget effect } else { cov_mat_pred_id.setZero(); } } T_mat cross_cov(num_REs_pred, num_REs_obs);//Cross covariance between prediction and observation points //Calculate covariance matrices int cn = 0;//component number bool dont_add_but_overwrite = true; //Grouped random effects if (num_re_group_ > 0) { if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]); re_comp->AddPredCovMatrices(re_group_levels_pred[0], cross_cov, cov_mat_pred_id, predict_cov_mat, dont_add_but_overwrite, true, nullptr); dont_add_but_overwrite = false; if (predict_var) { re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr); } } else { for (int j = 0; j < num_re_group_; ++j) { std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]); std::vector<re_group_t> group_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { group_data.push_back(re_group_levels_pred[j][id]); } re_comp->AddPredCovMatrices(group_data, cross_cov, cov_mat_pred_id, predict_cov_mat, dont_add_but_overwrite, false, nullptr); dont_add_but_overwrite = false; if (predict_var) { re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr); } cn += 1; } if (num_re_group_rand_coef_ > 0) { //Random coefficient grouped random effects for (int j = 0; j < num_re_group_rand_coef_; ++j) { std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]); std::vector<re_group_t> group_data; std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]); group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index } re_comp->AddPredCovMatrices(group_data, cross_cov, cov_mat_pred_id, predict_cov_mat, false, false, rand_coef_data.data()); if (predict_var) { re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, rand_coef_data.data()); } cn += 1; } } } }//end grouped random effects //Gaussian process if (num_gp_ > 0) { std::shared_ptr<RECompGP<T_mat>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][cn]); re_comp_base->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, cross_cov, cov_mat_pred_id, predict_cov_mat, dont_add_but_overwrite, nullptr); dont_add_but_overwrite = false; if (predict_var) { re_comp_base->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr); } cn += 1; if (num_gp_rand_coef_ > 0) { std::shared_ptr<RECompGP<T_mat>> re_comp; //Random coefficient Gaussian processes for (int j = 0; j < num_gp_rand_coef_; ++j) { re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][cn]); std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]); } re_comp->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, cross_cov, cov_mat_pred_id, predict_cov_mat, false, rand_coef_data.data()); if (predict_var) { re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, rand_coef_data.data()); } cn += 1; } } } // Calculate predictive means and covariances if (gauss_likelihood_) {//Gaussian data if (only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { vec_t Zt_y_aux = vec_t::Zero(num_REs_obs); #pragma omp parallel { vec_t Zt_y_aux_private = vec_t::Zero(num_REs_obs); #pragma omp for for (data_size_t i = 0; i < num_data_per_cluster_[cluster_i]; ++i) { Zt_y_aux_private[re_comps_[cluster_i][0]->random_effects_indices_of_data_[i]] += y_aux_[cluster_i][i]; } #pragma omp critical { for (data_size_t i_re = 0; i_re < num_REs_obs; ++i_re) { Zt_y_aux[i_re] += Zt_y_aux_private[i_re]; } }//end omp critical }//end omp parallel mean_pred_id = cross_cov * Zt_y_aux; }//end only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ else { mean_pred_id = cross_cov * y_aux_[cluster_i]; } if ((predict_cov_mat || predict_var) && only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) { sp_mat_t* Z = re_comps_[cluster_i][0]->GetZ(); T_mat cross_cov_temp = cross_cov; cross_cov = cross_cov_temp * (*Z).transpose(); cross_cov_temp.resize(0, 0); //TODO (low-prio): things could be done more efficiently (using random_effects_indices_of_data_) as ZtZ_ is diagonal } if (predict_cov_mat) { if (only_grouped_REs_use_woodbury_identity_) { T_mat ZtM_aux = T_mat(Zt_[cluster_i] * cross_cov.transpose()); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal ZtM_aux = sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().inverse().matrix().asDiagonal() * ZtM_aux; cov_mat_pred_id -= (cross_cov * T_mat(cross_cov.transpose()) - ZtM_aux.transpose() * ZtM_aux); } else { cov_mat_pred_id -= (cross_cov * T_mat(cross_cov.transpose()) - ZtM_aux.transpose() * chol_facts_solve_[cluster_i].solve(ZtM_aux)); } } else { cov_mat_pred_id -= (cross_cov * (chol_facts_solve_[cluster_i].solve(T_mat(cross_cov.transpose())))); } }//end predict_cov_mat if (predict_var) { T_mat M_aux2; if (only_grouped_REs_use_woodbury_identity_) { T_mat ZtM_aux = T_mat(Zt_[cluster_i] * cross_cov.transpose()); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal M_aux2 = sqrt_diag_SigmaI_plus_ZtZ_[cluster_i].array().inverse().matrix().asDiagonal() * ZtM_aux; } else { ApplyPermutationCholeskyFactor<T_mat>(ZtM_aux, cluster_i); CalcLInvH(chol_facts_[cluster_i], ZtM_aux, M_aux2, true); } M_aux2 = M_aux2.cwiseProduct(M_aux2); cross_cov = cross_cov.cwiseProduct(cross_cov); #pragma omp parallel for schedule(static) for (int i = 0; i < num_REs_pred; ++i) { var_pred_id[i] -= cross_cov.row(i).sum() - M_aux2.col(i).sum(); } }//end only_grouped_REs_use_woodbury_identity_ else {//not only_grouped_REs_use_woodbury_identity_ T_mat M_auxT = cross_cov.transpose(); ApplyPermutationCholeskyFactor<T_mat>(M_auxT, cluster_i); CalcLInvH(chol_facts_[cluster_i], M_auxT, M_aux2, true); M_aux2 = M_aux2.cwiseProduct(M_aux2); #pragma omp parallel for schedule(static) for (int i = 0; i < num_REs_pred; ++i) { var_pred_id[i] -= M_aux2.col(i).sum(); } }//end not only_grouped_REs_use_woodbury_identity_ }//end predict_var }//end gauss_likelihood_ if (!gauss_likelihood_) {//not gauss_likelihood_ const double* fixed_effects_cluster_i_ptr = nullptr; // Note that fixed_effects_cluster_i_ptr is not used since calc_mode == false // The mode has been calculated already before in the Predict() function above if (vecchia_approx_) { likelihood_[cluster_i]->PredictLAApproxVecchia(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], B_[cluster_i], D_inv_[cluster_i], cross_cov, mean_pred_id, cov_mat_pred_id, var_pred_id, predict_cov_mat, predict_var, false); } else { if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->PredictLAApproxGroupedRE(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], SigmaI_[cluster_i], Zt_[cluster_i], cross_cov, mean_pred_id, cov_mat_pred_id, var_pred_id, predict_cov_mat, predict_var, false); } else if (only_one_grouped_RE_calculations_on_RE_scale_) { likelihood_[cluster_i]->PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], re_comps_[cluster_i][0]->cov_pars_[0], re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), cross_cov, mean_pred_id, cov_mat_pred_id, var_pred_id, predict_cov_mat, predict_var, false); } else if (only_one_GP_calculations_on_RE_scale_) { likelihood_[cluster_i]->PredictLAApproxOnlyOneGPCalculationsOnREScale(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(), cross_cov, mean_pred_id, cov_mat_pred_id, var_pred_id, predict_cov_mat, predict_var, false); } else { likelihood_[cluster_i]->PredictLAApproxStable(y_[cluster_i].data(), y_int_[cluster_i].data(), fixed_effects_cluster_i_ptr, num_data_per_cluster_[cluster_i], ZSigmaZt_[cluster_i], cross_cov, mean_pred_id, cov_mat_pred_id, var_pred_id, predict_cov_mat, predict_var, false); } } }//end not gauss_likelihood_ }//end CalcPred /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering * \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Total number of prediction locations (over all clusters) * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, data_size_t cluster_i, int num_data_pred, std::map<data_size_t, int>& num_data_per_cluster_pred, std::map<data_size_t, std::vector<int>>& data_indices_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) { int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; //Find nearest neighbors den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_); coords_all << gp_coords_mat_obs, gp_coords_mat_pred; std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli); if (CondObsOnly) { find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1); } else {//find neighbors among both the observed and prediction locations find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1); } //Random coefficients std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli); if (num_gp_rand_coef_ > 0) { for (int j = 0; j < num_gp_rand_coef_; ++j) { std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder) rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]); } #pragma omp for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { if (j == 0) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_); } int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1; vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[num_data_cli + i]; if ((num_data_cli + i) > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } // Determine Triplet for initializing Bpo and Bp std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp; for (int i = 0; i < num_data_pred_cli; ++i) { entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } else {//nearest neighbor belongs to predicted data entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.)); } } } sp_mat_t Bpo(num_data_pred_cli, num_data_cli); sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli); Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel) Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end()); sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli); Dp.setIdentity();//Put 1 on the diagonal (for nugget effect) #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below for (int j = 0; j < num_gp_total_; ++j) { if (j == 0) { re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false); re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; } }//end loop over components j //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0]; if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } Dp.coeffRef(i, i) += d_comp_j; } //2. remaining terms cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn); } } Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); }//end loop over data i mean_pred_id = -Bpo * y_[cluster_i]; if (!CondObsOnly) { sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data()); } if (predict_cov_mat) { if (CondObsOnly) { cov_mat_pred_id = Dp; } else { sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli); Identity.setIdentity(); sp_mat_t Bp_inv; eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true); cov_mat_pred_id = T_mat(Bp_inv * Dp * Bp_inv.transpose()); } } }//end CalcPredVecchiaObservedFirstOrder /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Total number of prediction locations (over all clusters) * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaPredictedFirstOrder(data_size_t cluster_i, int num_data_pred, std::map<data_size_t, int>& num_data_per_cluster_pred, std::map<data_size_t, std::vector<int>>& data_indices_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) { int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; int num_data_tot = num_data_cli + num_data_pred_cli; //Find nearest neighbors den_mat_t coords_all(num_data_tot, dim_gp_coords_); coords_all << gp_coords_mat_pred, gp_coords_mat_obs; std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot); find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); //Prepare data for random coefficients std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot); if (num_gp_rand_coef_ > 0) { for (int j = 0; j < num_gp_rand_coef_; ++j) { std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data #pragma omp for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]]; } #pragma omp for schedule(static) for (int i = 0; i < num_data_cli; ++i) { rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i]; } #pragma omp for schedule(static) for (int i = 0; i < num_data_tot; ++i) { if (j == 0) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_); } int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1; vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[i]; if (i > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } // Determine Triplet for initializing Bo, Bop, and Bp std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp; for (int i = 0; i < num_data_pred_cli; ++i) { entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } } for (int i = 0; i < num_data_cli; ++i) { entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) { if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.)); } else {//nearest neighbor belongs to predicted data entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.)); } } } sp_mat_t Bo(num_data_cli, num_data_cli); sp_mat_t Bop(num_data_cli, num_data_pred_cli); sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli); Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel) Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end()); Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end()); sp_mat_t Do_inv(num_data_cli, num_data_cli); sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli); Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect) Dp_inv.setIdentity(); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_tot; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below if (i > 0) { for (int j = 0; j < num_gp_total_; ++j) { if (j == 0) { re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false); re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; } }//end loop over components j } //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0]; if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) += d_comp_j; } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j; } } //2. remaining terms if (i > 0) { cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { if (i < num_data_pred_cli) { Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn); } } } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i); } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli); } }//end loop over data i sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop; chol_sp_mat_t CholFact; CholFact.compute(cond_prec); if (predict_cov_mat) { sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli); Identity.setIdentity(); sp_mat_t cond_prec_chol = CholFact.matrixL(); sp_mat_t cond_prec_chol_inv; eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true); cov_mat_pred_id = T_mat(cond_prec_chol_inv.transpose() * cond_prec_chol_inv); mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i]; } else { mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]); } }//end CalcPredVecchiaPredictedFirstOrder /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering * \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data * \param cluster_i Cluster index for which prediction are made * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, data_size_t cluster_i, std::map<data_size_t, int>& num_data_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) { if (num_gp_rand_coef_ > 0) { Log::REFatal("The Vecchia approximation for latent process(es) is currently not implemented when having random coefficients"); } int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; int num_data_tot = num_data_cli + num_data_pred_cli; //Find nearest neighbors den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_); coords_all << gp_coords_mat_obs, gp_coords_mat_pred; //Determine number of unique observartion locations std::vector<int> uniques;//unique points std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx); int num_coord_unique_obs = (int)uniques.size(); //Determine unique locations (observed and predicted) DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx); int num_coord_unique = (int)uniques.size(); den_mat_t coords_all_unique; if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed coords_all_unique = coords_all; } else { coords_all_unique = coords_all(uniques, Eigen::all); } //Determine incidence matrices sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size()); sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size()); for (int i = 0; i < num_data_tot; ++i) { if (i < num_data_cli) { Z_o.insert(i, unique_idx[i]) = 1.; } else { Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.; } } std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique); if (CondObsOnly) {//find neighbors among both the observed locations only find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1); } else {//find neighbors among both the observed and prediction locations find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); } // Determine Triplet for initializing Bpo and Bp std::vector<Triplet_t> entries_init_B; for (int i = 0; i < num_coord_unique; ++i) { entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } } sp_mat_t B(num_coord_unique, num_coord_unique); B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel) sp_mat_t D(num_coord_unique, num_coord_unique); D.setIdentity(); D.diagonal().array() = 0.; #pragma omp parallel for schedule(static) for (int i = 0; i < num_coord_unique; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below if (i > 0) { re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0]; //2. remaining terms if (i > 0) { den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } }//end loop over data i //Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1 sp_mat_t D_inv(num_coord_unique, num_coord_unique); D_inv.setIdentity(); D_inv.diagonal().array() = D.diagonal().array().pow(-1); sp_mat_t Identity_all(num_coord_unique, num_coord_unique); Identity_all.setIdentity(); sp_mat_t B_inv; eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true); //Calculate inverse of covariance matrix for observed data using the Woodbury identity sp_mat_t Z_o_T = Z_o.transpose(); sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o; chol_sp_mat_t CholFac_M_aux_Woodbury; CholFac_M_aux_Woodbury.compute(M_aux_Woodbury); if (predict_cov_mat) { //Using Eigen's solver sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T); sp_mat_t Identity_obs(num_data_cli, num_data_cli); Identity_obs.setIdentity(); sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs; sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T; sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv; mean_pred_id = M_aux * y_[cluster_i]; sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli); Identity_pred.setIdentity(); cov_mat_pred_id = T_mat(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose()); } else { vec_t resp_aux = Z_o_T * y_[cluster_i]; vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux); resp_aux = y_[cluster_i] - Z_o * resp_aux2; mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux; } }//end CalcPredVecchiaLatentObservedFirstOrder friend class REModel; }; } // namespace GPBoost #endif // GPB_RE_MODEL_TEMPLATE_H_
dynamic.c
#include <stdio.h> #include <omp.h> int main() { omp_set_dynamic(9); omp_set_num_threads(4); printf("%d\n", omp_get_dynamic( )); #pragma omp parallel #pragma omp master { printf("%d\n", omp_get_dynamic( )); } }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(8*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t3+Nx,64),floord(Nt+Nx-4,64)),floord(4*t1+Nx+5,64)),floord(8*t2+Nx+4,64)),floord(8*t1-8*t2+Nz+Nx+3,64));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),64*t4+62),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
O2VertIntegration.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_vi; void O2VertIntegration(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { gv_vi->data_pointer.p2[(block_index)][(cell_index)] += gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } }
rawMD5_fmt_plug.c
/* * Raw-MD5 (thick) based on Raw-MD4 w/ mmx/sse/intrinsics * This software is Copyright (c) 2011 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * OMP added May 2013, JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawMD5; #elif FMT_REGISTERS_H john_register_one(&fmt_rawMD5); #else #include <string.h> #include "arch.h" #include "md5.h" #include "misc.h" // error() #include "common.h" #include "johnswap.h" #include "formats.h" #include "base64_convert.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif //#undef SIMD_COEF_32 //#undef SIMD_PARA_MD5 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 256 // core i7 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-MD5" #define FORMAT_NAME "" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #ifndef MD5_BUF_SIZ #define MD5_BUF_SIZ 16 #endif #define CIPHERTEXT_LENGTH 32 #define DIGEST_SIZE 16 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define FORMAT_TAG "$dynamic_0$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests tests[] = { {"5a105e8b9d40e1329780d62ea2265d8a", "test1"}, {FORMAT_TAG "5a105e8b9d40e1329780d62ea2265d8a", "test1"}, {"098f6bcd4621d373cade4e832627b4f6", "test"}, {FORMAT_TAG "378e2c4a07968da2eca692320136433d", "thatsworking"}, {FORMAT_TAG "8ad8757baa8564dc136c1e07507f4a98", "test3"}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, #ifdef DEBUG {FORMAT_TAG "c9ccf168914a1bcfc3229f1948e67da0","1234567890123456789012345678901234567890123456789012345"}, #if PLAINTEXT_LENGTH >= 80 {FORMAT_TAG "57edf4a22be3c955ac49da2e2107b67a","12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, #endif #endif {"{MD5}CY9rzUYh03PK3k6DJie09g==", "test"}, {NULL} }; #ifdef SIMD_COEF_32 #define PLAINTEXT_LENGTH 55 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 static ARCH_WORD_32 (*saved_key)[MD5_BUF_SIZ*NBKEYS]; static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; #else static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #else self->params.max_keys_per_crypt *= 10; #endif #ifndef SIMD_COEF_32 saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #else saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); #endif } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); #ifndef SIMD_COEF_32 MEM_FREE(saved_len); #endif } /* Convert {MD5}CY9rzUYh03PK3k6DJie09g== to 098f6bcd4621d373cade4e832627b4f6 */ static char *prepare(char *fields[10], struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; if (!strncmp(fields[1], "{MD5}", 5) && strlen(fields[1]) == 29) { int res; res = base64_convert(&fields[1][5], e_b64_mime, 24, out, e_b64_hex, sizeof(out), flg_Base64_HEX_LOCASE); if (res >= 0) return out; } return fields[1]; } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (*p == '$' && !strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; if (ciphertext[0] == '$' && !strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return ciphertext; memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH); return out; } static void *get_binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[DIGEST_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext += TAG_LENGTH; for (i=0; i<4; i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; #if ARCH_LITTLE_ENDIAN out[i]=temp; #else out[i]=JOHNSWAP(temp); #endif } #if SIMD_COEF_32 && defined(REVERSE_STEPS) md5_reverse(out); #endif return out; } static char *source(char *source, void *binary) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; ARCH_WORD_32 b[4]; char *p; int i, j; memcpy(b, binary, sizeof(b)); #if SIMD_COEF_32 && defined(REVERSE_STEPS) md5_unreverse(b); #endif #if ARCH_LITTLE_ENDIAN==0 alter_endianity(b, 16); #endif p = &out[TAG_LENGTH]; for (i = 0; i < 4; i++) for (j = 0; j < 8; j++) *p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf]; return out; } #ifdef SIMD_COEF_32 static void set_key(char *_key, int index) { #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_32 *key = (ARCH_WORD_32*)_key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const ARCH_WORD_32 *key = (uint32_t*)(is_aligned(_key, sizeof(uint32_t)) ? _key : strcpy(buf_aligned, _key)); #endif ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*SIMD_COEF_32]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 0; while((temp = *key++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = (temp & 0xff) | (0x80 << 8); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = (temp & 0xffff) | (0x80 << 16); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = temp | (0x80U << 24); len+=3; goto key_cleaning; } *keybuf_word = temp; len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; #ifdef DEBUG /* This function is higly optimized and assumes that we are never ever given a key longer than fmt_params.plaintext_length. If we are, buffer overflows WILL happen */ if (len > PLAINTEXT_LENGTH) { fprintf(stderr, "\n** Core bug: got len %u\n'%s'\n", len, _key); error(); } #endif key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[14*SIMD_COEF_32] = len << 3; } #else static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; memcpy(saved_key[index], key, len); } #endif #ifdef SIMD_COEF_32 static char *get_key(int index) { static char out[PLAINTEXT_LENGTH + 1]; unsigned int i; ARCH_WORD_32 len = ((ARCH_WORD_32*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*SIMD_COEF_32] >> 3; for(i=0;i<len;i++) out[i] = ((char*)saved_key)[GETPOS(i, index)]; out[i] = 0; return (char*)out; } #else static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } #endif #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < loops; index++) { #if SIMD_COEF_32 SIMDmd5body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN); #else MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final((unsigned char *)crypt_key[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y; #if 1 const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32; #else const unsigned int c = SIMD_PARA_MD5; #endif for(y = 0; y < c; y++) for(x = 0; x < SIMD_COEF_32; x++) { if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x] ) return 1; } return 0; #else unsigned int index = 0; #if 1 for (index = 0; index < count; index++) #endif if (!memcmp(binary, crypt_key[index], BINARY_SIZE)) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x = index&(SIMD_COEF_32-1); unsigned int y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4]; #else return !memcmp(binary, crypt_key[index], DIGEST_SIZE); #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_COEF_32 ARCH_WORD_32 crypt_key[DIGEST_SIZE / 4]; MD5_CTX ctx; char *key = get_key(index); MD5_Init(&ctx); MD5_Update(&ctx, key, strlen(key)); MD5_Final((void*)crypt_key, &ctx); #ifdef REVERSE_STEPS md5_reverse(crypt_key); #endif return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE); #else return 1; #endif } #ifdef SIMD_COEF_32 #define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } #endif struct fmt_main fmt_rawMD5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, fmt_default_salt, { NULL }, source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
zpbtrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_pbtrf * * Performs the Cholesky factorization of an Hermitian positive matrix A, * * \f[ A = L \times L^T \f] or \f[ A = U^T \times U \f] * * if uplo = upper or lower, respectively, where L is lower triangular with * positive diagonal elements, and U is upper triangular. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] kd * The number of subdiagonals within the band of A if uplo=upper, * or the number of superdiagonals if uplo=lower. kd >= 0. * * @param[in,out] AB * On entry, the upper or lower triangle of the Hermitian band * matrix A, stored in the first KD+1 rows of the array. The * j-th column of A is stored in the j-th column of the array AB * as follows: * if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j; * if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd). * \n * On exit, if INFO = 0, the triangular factor U or L from the * Cholesky factorization A = U^H*U or A = L*L^H of the band * matrix A, in the same storage format as A. * * @param[in] ldab * The leading dimension of the array AB. ldab >= 2*kl+ku+1. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_zpbtrf * @sa plasma_cpbtrf * @sa plasma_dpbtrf * @sa plasma_spbtrf * ******************************************************************************/ int plasma_zpbtrf(plasma_enum_t uplo, int n, int kd, plasma_complex64_t *pAB, int ldab) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kd < 0) { plasma_error("illegal value of kd"); return -3; } if (ldab < kd+1) { plasma_error("illegal value of ldab"); return -5; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. int lm = nb*(1+(kd+nb-1)/nb); plasma_desc_t AB; int retval; retval = plasma_desc_general_band_create(PlasmaComplexDouble, uplo, nb, nb, lm, n, 0, 0, n, n, kd, kd, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zpb2desc(pAB, ldab, AB, sequence, &request); // Call the tile async function. plasma_omp_zpbtrf(uplo, AB, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2pb(AB, pAB, ldab, sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&AB); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_pbtrf * * Performs the Cholesky factorization of a Hermitian positive definite * matrix. * Non-blocking tile version of plasma_zpbtrf(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] AB * Descriptor of matrix AB. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zpbtrf * @sa plasma_omp_zpbtrf * @sa plasma_omp_cpbtrf * @sa plasma_omp_dpbtrf * @sa plasma_omp_spbtrf * ******************************************************************************/ void plasma_omp_zpbtrf(plasma_enum_t uplo, plasma_desc_t AB, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.m == 0) return; // Call the parallel function. plasma_pzpbtrf(uplo, AB, sequence, request); }
geometric_transform.c
#include <stdlib.h> #include <string.h> #include "private/core_private.h" #include "private/imcore_private.h" #include "private/lacore_private.h" return_t imcrop(matrix_t *in, struct rectangle_t crop_region, matrix_t *out) { int cond1 = is_image(in) & is_image(out); check_condition(cond1, ERROR_NOT_IMAGE, "input and output must be an image"); int cond2 = crop_region.width > 0 && crop_region.height > 0; check_condition(cond2, ERROR_DIMENSION_MISMATCH, "width and height of the crop region must be positive"); // compute the start and end of the crop region int32_t x1 = 0, xs = minimum(crop_region.x, width(in) - 1), xe = minimum(xs + crop_region.width - 1, width(in) - 1); // if the crop region starts with a negative number, recompute the values if (crop_region.x < 0) { x1 = minimum(-crop_region.x, width(in) - 1); xs = 0; xe = minimum(crop_region.x + crop_region.width - 1, width(in) - 1); } // compute the start and end of the crop region int32_t y1 = 0, ys = minimum(crop_region.y, height(in) - 1), ye = minimum(ys + crop_region.height - 1, height(in) - 1); // if the crop region starts with a negative number, recompute the values if (crop_region.y < 0) { y1 = minimum(-crop_region.y, height(in) - 1); ys = 0; ye = minimum(crop_region.y + crop_region.height - 1, height(in) - 1); } // allocate out before use it return_t returnValue = matrix_resize(out, crop_region.height, crop_region.width, channels(in)); check_return(returnValue, returnValue); // if we are on the limits, set the out to zero if (xs == 0 || ys == 0 || xe == width(in) -1 || ye == height(in) - 1) { memset(mdata(out,0), 0, volume(out) * elemsize(out)); } uint32_t h; uint32_t blockSize = (xe - xs + 1) * channels(in) * elemsize(in); uint32_t outputStep = channels(out) * width(out) * elemsize(in); uint32_t inputStep = channels(in) * width(in) * elemsize(in); // TODO: create in_data pointer based on the input type uint8_t *in_data = data(uint8_t, in, ys, xs, 0); uint8_t *out_data = data(uint8_t, out, y1, x1, 0); for (h = 0; h < (ye - ys + 1); h++, out_data += outputStep, in_data += inputStep) { memcpy(out_data, in_data, blockSize); } return returnValue; } return_t imresize(matrix_t *in, uint32_t nwidth, uint32_t nheight, matrix_t *out) { int cond1 = is_image(in) & is_image(out); check_condition(cond1, ERROR_NOT_IMAGE, "input and output must be an image"); // allocate out before use it matrix_resize(out, nheight, nwidth, channels(in)); int w,h,d; double scalex = width(in) / (double)nwidth; double scaley = height(in) / (double)nheight; // TODO: create in_data pointer based on the input type uint8_t *in_data = data(uint8_t, in); uint8_t *out_data = data(uint8_t, out); int shidx, hidx, swidx, widx; #pragma omp parallel for private(shidx, hidx, widx, swidx, h, w, d) for(h=0; h < nheight; h++) { shidx = h*nwidth; hidx = ((int)(h*scaley))*width(in); for(w=0; w < nwidth; w++) { widx = channels(in)*( ((int)(w*scalex)) +hidx); swidx = channels(in)*(w+shidx); for(d=0; d < channels(in); d++) { out_data[swidx + d] = in_data[widx + d]; } } } //done return SUCCESS; } /* Scale2x algorithm written from the wikipedia reference A --\ 1 2 C P B --/ 3 4 D 1=P; 2=P; 3=P; 4=P; IF C==A AND C!=D AND A!=B => 1=A IF A==B AND A!=C AND B!=D => 2=B IF D==C AND D!=B AND C!=A => 3=C IF B==D AND B!=A AND D!=C => 4=D */ return_t imscale2x(matrix_t *in, matrix_t *out) { int cond1 = is_image(in) & is_image(out); check_condition(cond1, ERROR_NOT_IMAGE, "input and output must be an image"); uint32_t nwidth = 2*width(in); uint32_t nheight = 2*height(in); // allocate out before use it matrix_resize(out, nheight, nwidth, channels(in)); // TODO: create in_data pointer based on the input type uint8_t *in_data = data(uint8_t, in); uint8_t *out_data = data(uint8_t, out); int w,h,d; int h0, hW, sh01, shW1, sh02, shW2; // copy borders directly for(h=0; h < height(in); h++) { h0 = channels(in)*(h*width(in) + 0); hW = channels(in)*(h*width(in) + width(in)-1); sh01 = channels(out)*((h+0)*width(out) + 0); sh02 = channels(out)*((h+1)*width(out) + 0); shW1 = channels(out)*((h+0)*width(out) + width(out)-1); shW2 = channels(out)*((h+1)*width(out) + width(out)-1); for(d=0; d < channels(in); d++) { // first coloumn of the image out_data[sh01 + d] = in_data[h0 + d]; out_data[sh02 + d] = in_data[h0 + d]; // last coloumn of the image out_data[shW1 + d] = in_data[hW + d]; out_data[shW2 + d] = in_data[hW + d]; } } int w0, wH, sw01, swH1, sw02, swH2; for(w=0; w < width(in); w++) { w0 = channels(in)*(0*width(in) + w); wH = channels(in)*((height(in)-1)*width(in) + w); sw01 = channels(out)*(0*width(in) + w + 0); sw02 = channels(out)*(0*width(in) + w + 1); swH1 = channels(out)*((height(out)-1)*width(out) + w + 0); swH2 = channels(out)*((height(out)-1)*width(out) + w + 1); for(d=0; d < channels(in); d++) { // first coloumn of the image out_data[sw01 + d] = in_data[w0 + d]; out_data[sw02 + d] = in_data[w0 + d]; // last coloumn of the image out_data[swH1 + d] = in_data[wH + d]; out_data[swH2 + d] = in_data[wH + d]; } } //uint8_t A,B,C,D,E,F,G,H,I; uint8_t B,D,E,F,H; uint8_t E0,E1,E2,E3; for(h=1; h < height(in)-1; h++) { for(w=1; w < width(in)-1; w++) { for(d=0; d < channels(in); d++) { // get 8 neighbours of w,h,c pixel uint32_t in_idx = channels(in)*(h*width(in) + w) + d; //A = in_data[in_idx - channels(in)*(width(in)+1)]; B = in_data[in_idx - channels(in)*(width(in)+0)]; //C = in_data[in_idx - channels(in)*(width(in)-1)]; D = in_data[in_idx - channels(in)]; E = in_data[in_idx]; F = in_data[in_idx + channels(in)]; //G = in_data[in_idx + channels(in)*(width(in)-1)]; H = in_data[in_idx + channels(in)*(width(in)+0)]; //I = in_data[in_idx + channels(in)*(width(in)+1)]; if (B != H && D != F) { E0 = D == B ? D : E; E1 = B == F ? F : E; E2 = D == H ? D : E; E3 = H == F ? F : E; } else { E0 = E; E1 = E; E2 = E; E3 = E; } out_data[channels(out)*((2*h+0)*width(out) + 2*w+0) + d] = E0; out_data[channels(out)*((2*h+1)*width(out) + 2*w+0) + d] = E2; out_data[channels(out)*((2*h+0)*width(out) + 2*w+1) + d] = E1; out_data[channels(out)*((2*h+1)*width(out) + 2*w+1) + d] = E3; } } } //done return SUCCESS; } matrix_t *maketform(float data[9]) { // check that the inpu is inversible double det = data[0]*data[4]*data[8]; det += data[3]*data[7]*data[2]; det += data[6]*data[1]*data[5]; det -= data[0]*data[7]*data[5]; det -= data[6]*data[4]*data[2]; det -= data[3]*data[1]*data[8]; // first create a new empty matrix matrix_t *out = matrix_create(float); // TODO: define an epsilon here instead of static number check_condition(!equal(det, 0, 0.00001), out, "transform matrix is not valid!"); // do new copy for the output matrix_resize(out, 3,3,1); det = 1.0f / det; // first row at(float, out, 0) = det*( data[4]*data[8] - data[5]*data[7]); at(float, out, 1) = det*( data[2]*data[7] - data[1]*data[8]); at(float, out, 2) = det*( data[1]*data[5] - data[2]*data[4]); // second row at(float, out, 3) = det*( data[5]*data[6] - data[3]*data[8]); at(float, out, 4) = det*( data[0]*data[8] - data[2]*data[6]); at(float, out, 5) = det*( data[2]*data[3] - data[0]*data[5]); // third row at(float, out, 6) = det*( data[3]*data[7] - data[4]*data[6]); at(float, out, 7) = det*( data[1]*data[6] - data[0]*data[7]); at(float, out, 8) = det*( data[0]*data[4] - data[1]*data[3]); // done return out; } matrix_t *rot2tform(float cx, float cy, float theta, float scale) { // first create a new empty matrix matrix_t *out = matrix_create(float); // TODO: define an epsilon here instead of static number check_condition(!equal(scale, 0, 0.00001), out, "transform matrix is not valid!"); float alpha = scale*cosf(deg2rad(theta)); float beta = scale*sinf(deg2rad(theta)); float Tx = (1-alpha)*cx - beta*cy; float Ty = (beta*cx + (1-alpha)*cy); float s = 1/(scale*scale); // determinant // get space for the inverse matrix matrix_resize(out, 3,3,1); // Since the transformation matrix is known, we return inverse matrix float *out_data = data(float, out); out_data[0] = alpha*s; out_data[1] = -beta*s; out_data[2] = (Ty*beta - Tx*alpha)*s; out_data[3] = beta*s; out_data[4] = alpha*s; out_data[5] = -(Tx*beta + Ty*alpha)*s; out_data[6] = 0; out_data[7] = 0; out_data[8] = 1; // return the result return out; } matrix_t *pts2tform(struct point_t *src, struct point_t *dst, int pts_length) { // do new copy for the output matrix_t *inv = matrix_create(float); // chack that the inputs are correct int cond1 = pts_length == 3 || pts_length ==4; check_condition(cond1, inv, "provide 3 or 4 points to find an affine transform!"); // resize the matrix so that we can insert data in it matrix_resize(inv, 3,3,1); float *inv_data = data(float, inv); inv_data[8] = 1; // SOLVE 3 POINT AFFINE (SCALE,ROTATION,SHEAR,TRANSITION) COEFFICIENTS if(pts_length == 3) { // We should solve 6x6 matrix, since it is a block matrix, we can use simpler method float dst_data[] = {dst[0].x, dst[0].y, 1, dst[1].x, dst[1].y, 1, dst[2].x, dst[2].y, 1}; // get inverse of the created matrix matrix_t *invA = maketform(dst_data); float *invA_data = data(float, invA); inv_data[0] = src[0].x*invA_data[0]+src[1].x*invA_data[1]+src[2].x*invA_data[2]; inv_data[1] = src[0].x*invA_data[3]+src[1].x*invA_data[4]+src[2].x*invA_data[5]; inv_data[2] = src[0].x*invA_data[6]+src[1].x*invA_data[7]+src[2].x*invA_data[8]; inv_data[3] = src[0].y*invA_data[0]+src[1].y*invA_data[1]+src[2].y*invA_data[2]; inv_data[4] = src[0].y*invA_data[3]+src[1].y*invA_data[4]+src[2].y*invA_data[5]; inv_data[5] = src[0].y*invA_data[6]+src[1].y*invA_data[7]+src[2].y*invA_data[8]; inv_data[6] = 0; inv_data[7] = 0; //do something here! matrix_free(&invA); } else if(pts_length == 4) { float dst_data[] = { dst[0].x, dst[0].y, 1, 0, 0, 0, -src[0].x * dst[0].x, -src[0].x * dst[0].y, 0, 0, 0, dst[0].x, dst[0].y, 1, -src[0].y * dst[0].x, -src[0].y * dst[0].y, dst[1].x, dst[1].y, 1, 0, 0, 0, -src[1].x * dst[1].x, -src[1].x * dst[1].y, 0, 0, 0, dst[1].x, dst[1].y, 1, -src[1].y * dst[1].x, -src[1].y * dst[1].y, dst[2].x, dst[2].y, 1, 0, 0, 0, -src[2].x * dst[2].x, -src[2].x * dst[2].y, 0, 0, 0, dst[2].x, dst[2].y, 1, -src[2].y * dst[2].x, -src[2].y * dst[2].y, dst[3].x, dst[3].y, 1, 0, 0, 0, -src[3].x * dst[3].x, -src[3].x * dst[3].y, 0, 0, 0, dst[3].x, dst[3].y, 1, -src[3].y * dst[3].x, -src[3].y * dst[3].y, }; float b_data[] = {src[0].x, src[0].y, src[1].x, src[1].y, src[2].x, src[2].y, src[3].x, src[3].y}; matrix_t *inA = matrix_create(float, 8, 8, 1, dst_data); matrix_t *inB = matrix_create(float, 8, 1, 1, b_data); matrix_divide(inA, inB, inv); matrix_free(&inA); matrix_free(&inB); } return inv; } struct point_t apply_tform(struct point_t in, matrix_t *tform) { matrix_t *itform = maketform(data(float, tform)); float *tr_data = data(float, tform); struct point_t out = {0, 0}; double zp = in.x*tr_data[6] + in.y*tr_data[7] + tr_data[8]; // TODO: define an epsilon here instead of static number if(equal(zp, 0, 0.00001)) { return out; } out.x = (in.x*tr_data[0] + in.y*tr_data[1] + tr_data[2]) / zp; out.y = (in.x*tr_data[3] + in.y*tr_data[4] + tr_data[5]) / zp; matrix_free(&itform); return out; } return_t imtransform(matrix_t *in, matrix_t *tr, matrix_t *out) { int cond1 = is_image(in) & is_image(out); check_condition(cond1, ERROR_NOT_IMAGE, "input and output must be an image"); // get the size of the output, if it has a size than use it uint32_t nwidth = width(out); uint32_t nheight = height(out); // otherwise decide the size based on the input size if(nwidth==0 && nheight==0) { // TODO: auto find the meaningful image region and create it /* // auto find new width and height UL,UR,BL,BR corners of the input point_t pin[4] = {0,0, width(in),0, width(in),height(in), 0,height(in)}; point_t pout = apply_tform(pin[0], tr); int xmin = pout.x; int xmax = pout.x; int ymin = pout.y; int ymax = pout.y; int c = 0; for(c=1; c < 4; c++) { pout = apply_tform(pin[c], tr); //printf("%3.2f %3.2f\n", pout[c].x, pout[c].y); if(xmin > pout.x) { xmin = pout.x; } if(xmax < pout.x) { xmax = pout.x; } if(ymin > pout.y) { ymin = pout.y; } if(ymax < pout.y) { ymax = pout.y; } } // consider xmin|ymin < 0 nwidth = xmax-xmin; nheight = ymax-ymin; */ nwidth = width(in); nheight = height(in); } matrix_resize(out, nheight, nwidth, channels(in)); // TODO: create in_data pointer based on the input type uint8_t *in_data = data(uint8_t, in); uint8_t *out_data = data(uint8_t, out); int w,h; // Affine int shidx, x, y, d; float *tr_data = data(float, tr); //#pragma omp parallel for private(shidx, x, y, h, w, d) for(h=0; h < height(out); h++) { shidx = h*width(out); for(w=0; w < width(out); w++) { double zp = w*tr_data[6] + h*tr_data[7] + tr_data[8]; // TODO: define an epsilon here instead of static number if(equal(zp, 0, 0.00001)) { continue; } x = (w*tr_data[0] + h*tr_data[1] + tr_data[2]) / zp; y = (w*tr_data[3] + h*tr_data[4] + tr_data[5]) / zp; //printf("coor: %d --> %d\t %d --> %d\n", x,w, y,h); // check whether the x,y is outside the image if( (x < 0) || (y < 0) || (x > width(in)-1) || (y > height(in)-1) ) { continue; } for(d=0; d < channels(in); d++) { out_data[channels(out)*(w+shidx) + d] = in_data[ channels(in)*(x+width(in)*y) + d]; } } } return SUCCESS; }
omp_cgemm_batch.c
/** * @file omp_cgemm_batch.c * * @brief BBLAS gemm_batch float _Complex routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_zgemm_batch.c normal z -> c, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_comp.h" #include "bblas.h" #include <omp.h> #define COMPLEX /** Purpose ------- <b>omp_cgemm_batch</b> is an OpenMP version of cgemm_batch. It performs the matrix-matrix operations arrayC[i] = alpha[i]*op( arrayA[i] )*op( arrayB[i] ) + beta[i]*arrayC[i], where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha[i] and beta[i] are scalars, and arrayA[i], arrayB[i] and C are matrices, with op( arrayA[i] ) an m by k matrix, op( arrayB[i] ) a k by n matrix and arrayC[i] an m by n matrix. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of transA[0], transB[0], M[0], N[0], K[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] transA Array of <tt>enum BBLAS_TRANS</tt>. On entry, transA[i] specifies the form of op( arrayA[i] ) to be used in the matrix multiplication as follows: - = BblasNoTrans: op( arrayA[i] ) = arrayA[i]. - = BblasTrans: op( arrayA[i] ) = arrayA[i]**T. - = BblasConjTrans: op( arrayA[i] ) = arrayA[i]**H. @param[in] transB Array of <tt>enum BBLAS_TRANS</tt>. On entry, transB[i] specifies the form of op( arrayB[i] ) to be used in the matrix multiplication as follows: - = BblasNoTrans: op( arrayB[i] ) = arrayB[i]. - = BblasTrans: op( arrayB[i] ) = arrayB[i]**T. - = BblasConjTrans: op( arrayB[i] ) = arrayB[i]**H. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix op( arrayA[i] ) and of the matrix arrayC[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix op( arrayB[i] ) and the number of columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] K Array of <tt>int</tt>. Each element K[i] specifies the number of columns of the matrix op( arrayA[i] ) and the number of rows of the matrix op( arrayB[i] ). K[i] must be greater than zero. @param[in] alpha Array of <tt>complex_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a COMPLEX matrix of dimension lda[i] by Ka[i], where Ka[i] is K[i] when transA[i] = BblasNoTrans, and is M[i] otherwise. When using transA[i] = BblasNoTrans the leading M[i] by K[i] part of arrayA[i] must contain the matrix elements, otherwise the leading K[i] by M[i] part of arrayA[i] must contain the matrix elements. @param[in] lda Array of <tt>int</tt>. Each element lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When transA[i] = BblasNoTrans then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, K[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a COMPLEX matrix of dimension ldb[i] by Kb[i], where Kb[i] is N[i] when transB[i] = BblasNoTrans, and is K[i] otherwise. When using transB[i] = BblasNoTrans the leading K[i] by N[i] part of arrayB[i] must contain the matrix elements, otherwise the leading N[i] by K[i] part of arrayB[i] must contain the matrix elements. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. When transB[i] = BblasNoTrans then ldb[i] must be at least max( 1, K[i] ), otherwise ldb[i] must be at least max( 1, N[i] ). @param[in] beta Array of <tt>complex_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each element arrayC[i] is a pointer to a COMPLEX matrix of dimension ldc[i] by N[i]. Before entry, the leading M[i] by N[i] part of the arrayC[i] must contain a matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix ( alpha[i]*op( arrayA[i] )*op( arrayB[i] ) + beta[i]*arrayC[i] ). @param[in] ldc Array of <tt>int</tt>. Each element ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. The value ldc[i] must be at least max( 1, M[i] ) @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[in,out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith cgemm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_cgemm_batch( const enum BBLAS_TRANS *transA, const enum BBLAS_TRANS *transB, const int *M, const int *N, const int *K, const BBLAS_Complex32_t *alpha, const BBLAS_Complex32_t **arrayA, const int *lda, const BBLAS_Complex32_t **arrayB, const int *ldb, const BBLAS_Complex32_t *beta, BBLAS_Complex32_t **arrayC, const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int LDA, LDB, batch_iter; char func_name[15] = "cgemm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((transA[first_index] != BblasNoTrans) && (transA[first_index] != BblasTrans) && (transA[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANSA; } return; } if ((transB[first_index] != BblasNoTrans) && (transB[first_index] != BblasTrans) && (transB[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANSB; } return; } if ( transA[first_index] == BblasNoTrans ) { LDA = M[first_index]; } else { LDA = K[first_index]; } if ( transB[first_index] == BblasNoTrans ) { LDB = K[first_index]; } else { LDB = N[first_index]; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (K[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_K; } return; } if (lda[first_index] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (M[first_index] == 0 || N[first_index] == 0 || ((alpha[first_index] == (BBLAS_Complex32_t)0.0 || K[first_index] == 0) && beta[first_index] == (BBLAS_Complex32_t)1.0 )) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for for (int batch_iter_omp = 0; batch_iter_omp < batch_count; batch_iter_omp++) { /*Call to cblas_cgemm */ cblas_cgemm( BblasColMajor, transA[first_index], transB[first_index], M[first_index], N[first_index], K[first_index], CBLAS_SADDR(alpha[first_index]), arrayA[batch_iter_omp], lda[first_index], arrayB[batch_iter_omp], ldb[first_index], CBLAS_SADDR(beta[first_index]), arrayC[batch_iter_omp], ldc[first_index]); /* Successful */ info[batch_iter_omp] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private(LDA, LDB) for (int batch_iter_omp = 0; batch_iter_omp < batch_count; batch_iter_omp++) { /* Check input arguments */ if ((transA[batch_iter_omp] != BblasNoTrans) && (transA[batch_iter_omp] != BblasTrans) && (transA[batch_iter_omp] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_TRANSA; continue; } if ((transB[batch_iter_omp] != BblasNoTrans) && (transB[batch_iter_omp] != BblasTrans) && (transB[batch_iter_omp] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSB, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_TRANSB; continue; } if (transA[batch_iter_omp] == BblasNoTrans) { LDA = M[batch_iter_omp]; } else { LDA = K[batch_iter_omp]; } if (transB[batch_iter_omp] == BblasNoTrans) { LDB = K[batch_iter_omp]; } else { LDB = N[batch_iter_omp]; } if (M[batch_iter_omp] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_M; continue; } if (N[batch_iter_omp] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_N; continue; } if (K[batch_iter_omp] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_K; continue; } if (lda[batch_iter_omp] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter_omp] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter_omp] < max(1, M[batch_iter_omp])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_LDC; continue; } /* particular case */ if (M[batch_iter_omp] == 0 || N[batch_iter_omp] == 0 || ((alpha[batch_iter_omp] == (BBLAS_Complex32_t)0.0 || K[batch_iter_omp] == 0) && beta[batch_iter_omp] == (BBLAS_Complex32_t)1.0)) { info[batch_iter_omp] = BBLAS_SUCCESS; continue; } cblas_cgemm( BblasColMajor, transA[batch_iter_omp], transB[batch_iter_omp], M[batch_iter_omp], N[batch_iter_omp], K[batch_iter_omp], CBLAS_SADDR(alpha[batch_iter_omp]), arrayA[batch_iter_omp], lda[batch_iter_omp], arrayB[batch_iter_omp], ldb[batch_iter_omp], CBLAS_SADDR(beta[batch_iter_omp]), arrayC[batch_iter_omp], ldc[batch_iter_omp]); /* Successful */ info[batch_iter_omp] = BBLAS_SUCCESS; } } else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef COMPLEX
Example_nested_loop.1.c
/* * @@name: nested_loop.1c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ void work(int i, int j) {} void good_nesting(int n) { int i, j; #pragma omp parallel default(shared) { #pragma omp for for (i=0; i<n; i++) { #pragma omp parallel shared(i, n) { #pragma omp for for (j=0; j < n; j++) work(i, j); } } } }
GB_kroner.c
//------------------------------------------------------------------------------ // GB_kroner: Kronecker product, C = kron (A,B) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C = kron(A,B) where op determines the binary multiplier to use. The type of // A and B are compatible with the x and y inputs of z=op(x,y), but can be // different. The type of C is the type of z. C is hypersparse if either A // or B are hypersparse. // FUTURE: GB_kron would be faster with built-in types and operators. // FUTURE: at most one thread is used for each vector of C=kron(A,B). The // matrix C is normally very large, but if both A and B are n-by-1, then C is // n^2-by-1 and only a single thread is used. A better method for this case // would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not // accounted for in the parallel load-balancing. #include "GB_kron.h" GrB_Info GB_kroner // C = kron (A,B) ( GrB_Matrix *Chandle, // output matrix const bool C_is_csc, // desired format of C const GrB_BinaryOp op, // multiply operator const GrB_Matrix A, // input matrix const GrB_Matrix B, // input matrix GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (Chandle != NULL) ; ASSERT_OK (GB_check (A, "A for kron (A,B)", GB0)) ; ASSERT_OK (GB_check (B, "B for kron (A,B)", GB0)) ; ASSERT_OK (GB_check (op, "op for kron (A,B)", GB0)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Info info ; (*Chandle) = NULL ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_void *restrict Ax = A->x ; const int64_t asize = A->type->size ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; int64_t anvec = A->nvec ; int64_t anz = GB_NNZ (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bi = B->i ; const GB_void *restrict Bx = B->x ; const int64_t bsize = B->type->size ; const int64_t bvlen = B->vlen ; const int64_t bvdim = B->vdim ; int64_t bnvec = B->nvec ; int64_t bnz = GB_NNZ (B) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- double work = ((double) anz) * ((double) bnz) + (((double) anvec) * ((double) bnvec)) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate the output matrix C //-------------------------------------------------------------------------- // C has the same type as z for the multiply operator, z=op(x,y) GrB_Index cvlen, cvdim, cnzmax, cnvec ; bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ; ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ; ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ; ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ; ASSERT (ok) ; // C is hypersparse if either A or B are hypersparse bool C_is_hyper = (cvdim > 1) && (A->is_hyper || B->is_hyper) ; GrB_Matrix C = NULL ; // allocate a new header for C GB_CREATE (&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, GB_SAME_HYPER_AS (C_is_hyper), B->hyper_ratio, cnvec, cnzmax, true, Context) ; if (info != GrB_SUCCESS) { // out of memory return (info) ; } //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = C->x ; const int64_t csize = C->type->size ; GxB_binary_function fmult = op->function ; GB_cast_function cast_A = GB_cast_factory (op->xtype->code, A->type->code), cast_B = GB_cast_factory (op->ytype->code, B->type->code) ; //-------------------------------------------------------------------------- // compute the column counts of C, and C->h if C is hypersparse //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2) for (int64_t kA = 0 ; kA < anvec ; kA++) { for (int64_t kB = 0 ; kB < bnvec ; kB++) { // get A(:,jA), the (kA)th vector of A int64_t jA = (Ah == NULL) ? kA : Ah [kA] ; int64_t aknz = Ap [kA+1] - Ap [kA] ; // get B(:,jB), the (kB)th vector of B int64_t jB = (Bh == NULL) ? kB : Bh [kB] ; int64_t bknz = Bp [kB+1] - Bp [kB] ; // determine # entries in C(:,jC), the (kC)th vector of C int64_t kC = kA * bnvec + kB ; Cp [kC] = aknz * bknz ; if (C_is_hyper) { Ch [kC] = jA * bvdim + jB ; } } } //-------------------------------------------------------------------------- // replace Cp with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ; if (C_is_hyper) C->nvec = cnvec ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // C = kron (A,B) //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2) for (int64_t kA = 0 ; kA < anvec ; kA++) { for (int64_t kB = 0 ; kB < bnvec ; kB++) { // get B(:,jB), the (kB)th vector of B int64_t pB_start = Bp [kB] ; int64_t pB_end = Bp [kB+1] ; int64_t bknz = pB_start - pB_end ; if (bknz == 0) continue ; GB_void bwork [bsize] ; // get C(:,jC), the (kC)th vector of C int64_t kC = kA * bnvec + kB ; int64_t pC = Cp [kC] ; // get A(:,jA), the (kA)th vector of A int64_t pA_start = Ap [kA] ; int64_t pA_end = Ap [kA+1] ; GB_void awork [asize] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // awork = A(iA,jA), typecasted to op->xtype int64_t iA = Ai [pA] ; int64_t iAblock = iA * bvlen ; cast_A (awork, Ax +(pA*asize), asize) ; for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // bwork = B(iB,jB), typecasted to op->ytype int64_t iB = Bi [pB] ; cast_B (bwork, Bx +(pB*bsize), bsize) ; // C(iC,jC) = A(iA,jA) * B(iB,jB) int64_t iC = iAblock + iB ; Ci [pC] = iC ; fmult (Cx +(pC*csize), awork, bwork) ; pC++ ; } } } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- if (C_is_hyper && C->nvec_nonempty < cnvec) { // create new Cp_new and Ch_new arrays, with no empty vectors int64_t *restrict Cp_new = NULL ; int64_t *restrict Ch_new = NULL ; int64_t nvec_new ; info = GB_hyper_prune (&Cp_new, &Ch_new, &nvec_new, C->p, C->h, cnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_MATRIX_FREE (&C) ; return (info) ; } // transplant the new hyperlist into C GB_FREE_MEMORY (C->p, cnvec+1, sizeof (int64_t)) ; GB_FREE_MEMORY (C->h, cnvec, sizeof (int64_t)) ; C->p = Cp_new ; C->h = Ch_new ; C->nvec = nvec_new ; C->plen = nvec_new ; ASSERT (C->nvec == C->nvec_nonempty) ; } ASSERT (C->nvec_nonempty == GB_nvec_nonempty (C, Context)) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_OK (GB_check (C, "C=kron(A,B)", GB0)) ; (*Chandle) = C ; return (GrB_SUCCESS) ; }
GB_binop__max_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__max_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__max_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint32) // A*D function (colscale): GB (_AxD__max_uint32) // D*A function (rowscale): GB (_DxB__max_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__max_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__max_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint32) // C=scalar+B GB (_bind1st__max_uint32) // C=scalar+B' GB (_bind1st_tran__max_uint32) // C=A+scalar GB (_bind2nd__max_uint32) // C=A'+scalar GB (_bind2nd_tran__max_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_UINT32 || GxB_NO_MAX_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__max_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__max_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__max_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_lr_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #define MAX_C_CONNECTIONS 100 #define HAVE_COMMON_C 1 /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildStdInterp * Comment: The interpolatory weighting can be changed with the sep_weight * variable. This can enable not separating negative and positive * off diagonals in the weight formula. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int *ihat = NULL; HYPRE_Int *ihat_offd = NULL; HYPRE_Int *ipnt = NULL; HYPRE_Int *ipnt_offd = NULL; HYPRE_Int strong_f_marker = -2; /* Interpolation weight variables */ HYPRE_Real *ahat = NULL; HYPRE_Real *ahat_offd = NULL; HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C; HYPRE_Real diagonal, distribute; HYPRE_Real alfa = 1.; HYPRE_Real beta = 1.; /* Loop variables */ // HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, j1, jj, kk, k1; HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Real wall_1 = 0; HYPRE_Real wall_2 = 0; HYPRE_Real wall_3 = 0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag== 4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 0); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } /* Initialize ahat, which is a modification to a, used in the standard * interpolation routine. */ if (n_fine) { ahat = hypre_CTAlloc(HYPRE_Real, n_fine); ihat = hypre_CTAlloc(HYPRE_Int, n_fine); ipnt = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes); ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; ahat[i] = 0; ihat[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; ahat_offd[i] = 0; ihat_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = i1; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = k1; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd]=i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = loc_col; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_1 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); cnt_c = 0; cnt_f = jj_end_row-jj_begin_row; cnt_c_offd = 0; cnt_f_offd = jj_end_row_offd-jj_begin_row_offd; ihat[i] = cnt_f; ipnt[cnt_f] = i; ahat[cnt_f++] = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is direct neighbor */ i1 = A_diag_j[jj]; if (P_marker[i1] != strong_f_marker) { indx = ihat[i1]; if (indx > -1) ahat[indx] += A_diag_data[jj]; else if (P_marker[i1] >= jj_begin_row) { ihat[i1] = cnt_c; ipnt[cnt_c] = i1; ahat[cnt_c++] += A_diag_data[jj]; } else if (CF_marker[i1] != -3) { ihat[i1] = cnt_f; ipnt[cnt_f] = i1; ahat[cnt_f++] += A_diag_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func[i1]) { distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]]; for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++) { k1 = A_diag_j[kk]; indx = ihat[k1]; if (indx > -1) ahat[indx] -= A_diag_data[kk]*distribute; else if (P_marker[k1] >= jj_begin_row) { ihat[k1] = cnt_c; ipnt[cnt_c] = k1; ahat[cnt_c++] -= A_diag_data[kk]*distribute; } else { ihat[k1] = cnt_f; ipnt[cnt_f] = k1; ahat[cnt_f++] -= A_diag_data[kk]*distribute; } } if(num_procs > 1) { for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++) { k1 = A_offd_j[kk]; indx = ihat_offd[k1]; if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1]) { if (indx > -1) ahat_offd[indx] -= A_offd_data[kk]*distribute; else if (P_marker_offd[k1] >= jj_begin_row_offd) { ihat_offd[k1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = k1; ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute; } else { ihat_offd[k1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = k1; ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute; } } } } } } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] != strong_f_marker) { indx = ihat_offd[i1]; if (indx > -1) ahat_offd[indx] += A_offd_data[jj]; else if (P_marker_offd[i1] >= jj_begin_row_offd) { ihat_offd[i1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = i1; ahat_offd[cnt_c_offd++] += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { ihat_offd[i1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = i1; ahat_offd[cnt_f_offd++] += A_offd_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]]; for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++) { k1 = A_ext_j[kk]; if(k1 >= col_1 && k1 < col_n) { /*diag*/ loc_col = k1 - col_1; indx = ihat[loc_col]; if (indx > -1) ahat[indx] -= A_ext_data[kk]*distribute; else if (P_marker[loc_col] >= jj_begin_row) { ihat[loc_col] = cnt_c; ipnt[cnt_c] = loc_col; ahat[cnt_c++] -= A_ext_data[kk]*distribute; } else { ihat[loc_col] = cnt_f; ipnt[cnt_f] = loc_col; ahat[cnt_f++] -= A_ext_data[kk]*distribute; } } else { loc_col = -k1 - 1; if(num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1]) { indx = ihat_offd[loc_col]; if (indx > -1) ahat_offd[indx] -= A_ext_data[kk]*distribute; else if(P_marker_offd[loc_col] >= jj_begin_row_offd) { ihat_offd[loc_col] = cnt_c_offd; ipnt_offd[cnt_c_offd] = loc_col; ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute; } else { ihat_offd[loc_col] = cnt_f_offd; ipnt_offd[cnt_f_offd] = loc_col; ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_2 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); diagonal = ahat[cnt_c]; ahat[cnt_c] = 0; sum_pos = 0; sum_pos_C = 0; sum_neg = 0; sum_neg_C = 0; sum = 0; sum_C = 0; if(sep_weight == 1) { for (jj=0; jj < cnt_c; jj++) { if (ahat[jj] > 0) { sum_pos_C += ahat[jj]; } else { sum_neg_C += ahat[jj]; } } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos_C += ahat_offd[jj]; } else { sum_neg_C += ahat_offd[jj]; } } } sum_pos = sum_pos_C; sum_neg = sum_neg_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { if (ahat[jj] > 0) { sum_pos += ahat[jj]; } else { sum_neg += ahat[jj]; } ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos += ahat_offd[jj]; } else { sum_neg += ahat_offd[jj]; } ahat_offd[jj] = 0; } } if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal; if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; if (ahat[j1] > 0) P_diag_data[jj] = -beta*ahat[j1]; else P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; if (ahat_offd[j1] > 0) P_offd_data[jj] = -beta*ahat_offd[j1]; else P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } else { for (jj=0; jj < cnt_c; jj++) { sum_C += ahat[jj]; } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { sum_C += ahat_offd[jj]; } } sum = sum_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { sum += ahat[jj]; ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { sum += ahat_offd[jj]; ahat_offd[jj] = 0; } } if (sum_C*diagonal) alfa = sum/sum_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_3 += wall_time; fflush(NULL); } } } if (debug_flag==4) { hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n", my_id, wall_1, wall_2, wall_3); fflush(NULL); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); hypre_TFree(ahat); hypre_TFree(ihat); hypre_TFree(ipnt); if (full_off_procNodes) { hypre_TFree(ahat_offd); hypre_TFree(ihat_offd); hypre_TFree(ipnt_offd); } if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime(); #endif /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1); HYPRE_Int * diag_offset; HYPRE_Int * fine_to_coarse_offset; HYPRE_Int * offd_offset; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } /* This function is smart enough to check P_marker and P_marker_offd only, * and set them if they are not NULL. The other vectors are set regardless.*/ hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); /*----------------------------------------------------------------------- * Initialize threading variables *-----------------------------------------------------------------------*/ max_num_threads[0] = hypre_NumThreads(); diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); for(i=0; i < max_num_threads[0]; i++) { diag_offset[i] = 0; fine_to_coarse_offset[i] = 0; offd_offset[i] = 0; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker) #endif { /* Parallelize by computing only over each thread's range of rows. * * The first large for loop computes ~locally~ for each thread P_diag_i, * P_offd_i and fine_to_coarse. Then, the arrays are stitched together * For eaxample the first phase would compute * P_diag_i = [0, 2, 4, 7, 2, 5, 6] * for two threads. P_diag_i[stop] points to the end of that * thread's data, but P_diag_i[start] points to the end of the * previous thread's row range. This is then stitched together at the * end to yield, * P_diag_i = [0, 2, 4, 7, 9, 14, 15]. * * The second large for loop computes interpolation weights and is * relatively straight-forward to thread. */ /* initialize thread-wise variables */ strong_f_marker = -2; coarse_counter = 0; jj_counter = start_indexing; jj_counter_offd = start_indexing; if (n_fine) { P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1;} } /* this thread's row range */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } /* loop over rows */ for (i = start; i < stop; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } /*----------------------------------------------------------------------- * End loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif P_diag_i[stop] = jj_counter; P_offd_i[stop] = jj_counter_offd; fine_to_coarse_offset[my_thread_num] = coarse_counter; diag_offset[my_thread_num] = jj_counter; offd_offset[my_thread_num] = jj_counter_offd; /* Stitch P_diag_i, P_offd_i and fine_to_coarse together */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { /* Calculate the offset for P_diag_i and P_offd_i for each thread */ for (i = 1; i < num_threads; i++) { diag_offset[i] = diag_offset[i-1] + diag_offset[i]; fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i]; offd_offset[i] = offd_offset[i-1] + offd_offset[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num > 0) { /* update row pointer array with offset, * making sure to update the row stop index */ for (i = start+1; i <= stop; i++) { P_diag_i[i] += diag_offset[my_thread_num-1]; P_offd_i[i] += offd_offset[my_thread_num-1]; } /* update fine_to_coarse by offsetting with the offset * from the preceding thread */ for (i = start; i < stop; i++) { if(fine_to_coarse[i] >= 0) { fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size); } } /* Fine to coarse mapping */ if(num_procs > 1 && my_thread_num == 0) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { jj_begin_row = P_diag_i[i]; jj_begin_row_offd = P_offd_i[i]; jj_counter = jj_begin_row; jj_counter_offd = jj_begin_row_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if(i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if(loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } /*----------------------------------------------------------------------- * End large for loop over nfine *-----------------------------------------------------------------------*/ if (n_fine) { hypre_TFree(P_marker); } if (full_off_procNodes) { hypre_TFree(P_marker_offd); } } /*----------------------------------------------------------------------- * End PAR_REGION *-----------------------------------------------------------------------*/ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime(); #endif P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(max_num_threads); hypre_TFree(fine_to_coarse); hypre_TFree(diag_offset); hypre_TFree(offd_offset); hypre_TFree(fine_to_coarse_offset); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPICCInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int **ext_p, **ext_p_offd;*/ /*HYPRE_Int ccounter_offd; HYPRE_Int *clist_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; /*HYPRE_Int ccounter; HYPRE_Int *clist, ccounter;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } /*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS); for(i = 0; i < MAX_C_CONNECTIONS; i++) clist[i] = 0; if(num_procs > 1) { clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS); for(i = 0; i < MAX_C_CONNECTIONS; i++) clist_offd[i] = 0; }*/ hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { /*clist[ccounter++] = i1;*/ P_marker[i1] = jj_counter; jj_counter++; } } } /*qsort0(clist,0,ccounter-1);*/ if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < P_offd_i[i]) { /*clist_offd[ccounter_offd++] = i1;*/ tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if(hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if(!common_c) { /* No common c point, extend the interp set */ for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if(CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; /*break;*/ } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; /*break;*/ } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] == -1) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { /*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { /*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; /*break;*/ } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; /*break;*/ } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; /*clist[ccounter++] = i1;*/ } } } /*qsort0(clist,0,ccounter-1);*/ if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*clist_offd[ccounter_offd++] = i1;*/ } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if(CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if(hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if(!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if(CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { /*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { /*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if(i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if(loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); /*hypre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFFInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int ccounter_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; /*HYPRE_Int ccounter; HYPRE_Int *clist, ccounter;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if(CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if(CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if(CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFF1Interp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int ccounter_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; /*HYPRE_Int ccounter;*/ HYPRE_Int found_c = 0; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ found_c = 0; for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if(CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; found_c = 1; break; } } } if(num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; break; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; break; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; break; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if(num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if(CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if(num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if(CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if(num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if(!common_c) { /* No common c point, extend the interp set */ found_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; found_c = 1; break; } } } if(num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if(CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -k1 - 1; if(CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if(!common_c) { for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; break; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if(col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); /*hynre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_Int *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_Int *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; if(k1 >= col_1 && k1 < col_n) { /* In S_diag */ loc_col = k1-col_1; if(P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { for (i = 0; i < n_fine; i++) fine_to_coarse[i] += my_first_cpt; hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, fine_to_coarse_offd); for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { k1 = Sop_j[kk]; /* Find local col number */ if(k1 >= col_1 && k1 < col_n) { loc_col = k1-col_1; if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row ) sum += A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { k1 = A_ext_j[jj1]; if(k1 >= col_1 && k1 < col_n) { /* diag */ loc_col = k1 - col_1; if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse); hypre_TFree(P_marker); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd); hypre_TFree(P_marker_offd); hypre_TFree(CF_marker_offd); hypre_TFree(tmp_CF_marker_offd); if(num_functions > 1) hypre_TFree(dof_func_offd); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; }
GB_bitmap_AxB_saxpy_A_sparse_B_bitmap_template.c
//------------------------------------------------------------------------------ // GB_bitmap_AxB_saxpy_A_sparse_B_bitmap: C<#M>+=A*B, C bitmap, M any format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { if (use_coarse_tasks) { //---------------------------------------------------------------------- // C<#M> += A*B using coarse tasks //---------------------------------------------------------------------- // number of columns in the workspace for each task #define GB_PANEL_SIZE 4 if (B_iso) { // Gb and Gx workspace is allocated below. TODO: only Gb workspace // is needed. Use a single bx scalar for all threads, instead. } //---------------------------------------------------------------------- // allocate workspace for each task //---------------------------------------------------------------------- GB_WERK_PUSH (GH_slice, 2*ntasks, int64_t) ; if (GH_slice == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict G_slice = GH_slice ; int64_t *restrict H_slice = GH_slice + ntasks ; int64_t gwork = 0 ; int64_t hwork = 0 ; int tid ; for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend ; GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ; int64_t jtask = jend - jstart ; int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ; G_slice [tid] = gwork ; H_slice [tid] = hwork ; if (jpanel > 1 || B_iso) { // no need to allocate workspace for Gb and Gx if jpanel == 1 gwork += jpanel ; } hwork += jpanel ; } int64_t bvlenx = (B_is_pattern ? 0 : bvlen) * GB_BSIZE ; int64_t cvlenx = (GB_IS_ANY_PAIR_SEMIRING ? 0 : cvlen) * GB_CSIZE ; int64_t bvlenb = (GB_B_IS_BITMAP ? bvlen : 0) ; size_t gfspace = gwork * bvlenb ; size_t wfspace = gfspace + hwork * cvlen ; size_t wbxspace = gwork * bvlenx ; size_t wcxspace = hwork * cvlenx ; Wf = GB_MALLOC_WERK (wfspace, int8_t, &Wf_size) ; Wbx = GB_MALLOC_WERK (wbxspace, GB_void, &Wbx_size) ; Wcx = GB_MALLOC_WERK (wcxspace, GB_void, &Wcx_size) ; if (Wf == NULL || Wcx == NULL || Wbx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // C<#M> += A*B //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vectors of B and C for this coarse task //------------------------------------------------------------------ int64_t jstart, jend ; GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ; int64_t jtask = jend - jstart ; int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ; int64_t task_cnvals = 0 ; //------------------------------------------------------------------ // get the workspace for this task //------------------------------------------------------------------ // Gb and Gx workspace to load the panel of B // Hf and Hx workspace to compute the panel of C int8_t *restrict Gb = Wf + G_slice [tid] * bvlenb ; int8_t *restrict Hf = Wf + (H_slice [tid] * cvlen) + gfspace ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_BTYPE *restrict Gx = (GB_BTYPE *) (Wbx + G_slice [tid] * bvlenx); GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + H_slice [tid] * cvlenx); #endif #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // clear the panel //------------------------------------------------------------------ memset (Hf, 0, jpanel * cvlen) ; //------------------------------------------------------------------ // C<#M>(:,jstart:jend-1) += A * B(:,jstart:jend-1) by panel //------------------------------------------------------------------ for (int64_t j1 = jstart ; j1 < jend ; j1 += jpanel) { //-------------------------------------------------------------- // get the panel of np vectors j1:j2-1 //-------------------------------------------------------------- int64_t j2 = GB_IMIN (jend, j1 + jpanel) ; int64_t np = j2 - j1 ; //-------------------------------------------------------------- // load and transpose B(:,j1:j2-1) for one panel //-------------------------------------------------------------- #if GB_B_IS_BITMAP { if (np == 1) { // no need to load a single vector of B Gb = (int8_t *) (Bb + (j1 * bvlen)) ; } else { // load and transpose the bitmap of B(:,j1:j2-1) for (int64_t jj = 0 ; jj < np ; jj++) { int64_t j = j1 + jj ; for (int64_t i = 0 ; i < bvlen ; i++) { Gb [i*np + jj] = Bb [i + j * bvlen] ; } } } } #endif #if ( !GB_IS_ANY_PAIR_SEMIRING ) if (!B_is_pattern) { if (np == 1 && !B_iso) { // no need to load a single vector of B GB_void *restrict Bx = (GB_void *) (B->x) ; Gx = (GB_BTYPE *) (Bx + (j1 * bvlen) * GB_BSIZE) ; } else { // load and transpose the values of B(:,j1:j2-1) for (int64_t jj = 0 ; jj < np ; jj++) { int64_t j = j1 + jj ; for (int64_t i = 0 ; i < bvlen ; i++) { // G(i,jj) = B(i,j), and change storage order int64_t pG = i*np + jj ; int64_t pB = i + j * bvlen ; GB_LOADB (Gx, pG, Bx, pB, B_iso) ; } } } } #endif //-------------------------------------------------------------- // H = A*G for one panel //-------------------------------------------------------------- for (int64_t kA = 0 ; kA < anvec ; kA++) { //---------------------------------------------------------- // get A(:,k) //---------------------------------------------------------- int64_t k = GBH (Ah, kA) ; int64_t pA = Ap [kA] ; int64_t pA_end = Ap [kA+1] ; int64_t pG = k * np ; #undef GB_MULT_A_ik_G_kjj #if GB_IS_PAIR_MULTIPLIER // t = A(i,k) * G (k,jj) is always equal to 1 #define GB_MULT_A_ik_G_kjj(jj) #else // t = A(i,k) * G (k,jj) GB_CIJ_DECLARE (t) ; #define GB_MULT_A_ik_G_kjj(jj) \ GB_GETB (gkj, Gx, pG+jj, false) ; \ GB_MULT (t, aik, gkj, i, k, j1 + jj) ; #endif #undef GB_HX_COMPUTE #define GB_HX_COMPUTE(jj) \ { \ /* H (i,jj) += A(i,k)*G(k,jj) */ \ if (!GB_B_IS_BITMAP || Gb [pG+jj]) \ { \ GB_MULT_A_ik_G_kjj (jj) ; \ if (Hf [pH+jj] == 0) \ { \ /* H(i,jj) is a new entry */ \ GB_HX_WRITE (pH+jj, t) ; /* Hx(i,jj)=t */ \ Hf [pH+jj] = 1 ; \ } \ else \ { \ /* H(i,jj) is already present */ \ GB_HX_UPDATE (pH+jj, t) ; /* Hx(i,jj)+=t */ \ } \ } \ } #undef GB_LOAD_A_ij #define GB_LOAD_A_ij \ int64_t i = Ai [pA] ; \ GB_GETA (aik, Ax, pA, A_iso) ; \ int64_t pH = i * np ; //---------------------------------------------------------- // H += A(:,k)*G(k,:) //---------------------------------------------------------- #if GB_B_IS_BITMAP bool gb = false ; switch (np) { case 4 : gb = Gb [pG+3] ; case 3 : gb |= Gb [pG+2] ; case 2 : gb |= Gb [pG+1] ; case 1 : gb |= Gb [pG ] ; default: ; } if (gb) #endif { switch (np) { case 4 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; GB_HX_COMPUTE (2) ; GB_HX_COMPUTE (3) ; } break ; case 3 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; GB_HX_COMPUTE (2) ; } break ; case 2 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; } break ; case 1 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; } break ; default:; } } #undef GB_MULT_A_ik_G_kjj #undef GB_HX_COMPUTE #undef GB_LOAD_A_ij } //-------------------------------------------------------------- // C<#M>(:,j1:j2-1) += H //-------------------------------------------------------------- for (int64_t jj = 0 ; jj < np ; jj++) { //---------------------------------------------------------- // C<#M>(:,j) += H (:,jj) //---------------------------------------------------------- int64_t j = j1 + jj ; int64_t pC_start = j * cvlen ; // get pointer to C(:,j) for (int64_t i = 0 ; i < cvlen ; i++) { int64_t pC = pC_start + i ; // pointer to C(i,j) int64_t pH = i * np + jj ; // pointer to H(i,jj) if (!Hf [pH]) continue ; Hf [pH] = 0 ; // clear the panel int8_t cb = Cb [pC] ; //------------------------------------------------------ // check M(i,j) //------------------------------------------------------ #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse bool mij = ((cb & 2) != 0) ^ Mask_comp ; if (!mij) continue ; cb = (cb & 1) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; #endif //------------------------------------------------------ // C(i,j) += H(i,jj) //------------------------------------------------------ if (cb == 0) { // C(i,j) = H(i,jj) GB_CIJ_GATHER (pC, pH) ; Cb [pC] = keep ; task_cnvals++ ; } else { // Currently, the matrix C is a newly allocated // matrix, not the C_in input matrix to GrB_mxm. // As a result, this condition is not used. It // will be in the future when this method is // modified to modify C in-place. ASSERT (GB_DEAD_CODE) ; // C(i,j) += H(i,jj) GB_CIJ_GATHER_UPDATE (pC, pH) ; } } } } cnvals += task_cnvals ; } #undef GB_PANEL_SIZE } else if (use_atomics) { //---------------------------------------------------------------------- // C<#M> += A*B using fine tasks and atomics //---------------------------------------------------------------------- if (B_iso) { // No special cases needed. GB_GET_B_kj (bkj = B(k,j)) // handles the B iso case. } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vector of B and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(:,j) and B(:,j). Its fine task // id ranges from 0 to nfine_tasks_per_vector-1, and determines // which slice of A to operate on. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t kfirst = A_slice [fine_tid] ; int64_t klast = A_slice [fine_tid + 1] ; int64_t pB_start = j * bvlen ; // pointer to B(:,j) int64_t pC_start = j * cvlen ; // pointer to C(:,j) GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ* int64_t task_cnvals = 0 ; // for Hx Gustavason workspace: use C(:,j) in-place: #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_CTYPE *restrict Hx = (GB_CTYPE *) (((GB_void *) Cx) + (pC_start * GB_CSIZE)) ; #endif #if GB_IS_PLUS_FC32_MONOID || GB_IS_ANY_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID || GB_IS_ANY_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // C<#M>(:,j) += A(:,k1:k2) * B(k1:k2,j) //------------------------------------------------------------------ for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // C<#M>(:,j) += A(:,k) * B(k,j) //-------------------------------------------------------------- int64_t k = GBH (Ah, kk) ; // k in range k1:k2 int64_t pB = pB_start + k ; // get pointer to B(k,j) if (!GBB (Bb, pB)) continue ; int64_t pA = Ap [kk] ; int64_t pA_end = Ap [kk+1] ; GB_GET_B_kj ; // bkj = B(k,j) for ( ; pA < pA_end ; pA++) { //---------------------------------------------------------- // get A(i,k) and C(i,j) //---------------------------------------------------------- int64_t i = Ai [pA] ; // get A(i,k) index int64_t pC = pC_start + i ; // get C(i,j) pointer int8_t cb ; //---------------------------------------------------------- // C<#M>(i,j) += A(i,k) * B(k,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { //------------------------------------------------------ // M is sparse, and scattered into the C bitmap //------------------------------------------------------ // finite-state machine in Cb [pC]: // 0: cij not present, mij zero // 1: cij present, mij zero (keep==1 for !M) // 2: cij not present, mij one // 3: cij present, mij one (keep==3 for M) // 7: cij is locked #if GB_HAS_ATOMIC { // if C(i,j) is already present and can be modified // (cb==keep), and the monoid can be done // atomically, then do the atomic update. No need // to modify Cb [pC]. GB_ATOMIC_READ cb = Cb [pC] ; // grab the entry if (cb == keep) { #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif continue ; // C(i,j) has been updated } } #endif do // lock the entry { // do this atomically: // { cb = Cb [pC] ; Cb [pC] = 7 ; } GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ; } while (cb == 7) ; // lock owner gets 0, 1, 2, or 3 if (cb == keep-1) { // C(i,j) is a new entry GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t task_cnvals++ ; cb = keep ; // keep the entry } else if (cb == keep) { // C(i,j) is already present #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif } GB_ATOMIC_WRITE Cb [pC] = cb ; // unlock the entry } #else { //------------------------------------------------------ // M is not present, or bitmap/full //------------------------------------------------------ // finite-state machine in Cb [pC]: // 0: cij not present; can be written // 1: cij present; can be updated // 7: cij is locked #if GB_MASK_IS_BITMAP_OR_FULL { // M is bitmap or full, and not in C bitmap. // Do not modify C(i,j) if not permitted by the mask GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; } #endif //------------------------------------------------------ // C(i,j) += A(i,j) * B(k,j) //------------------------------------------------------ #if GB_HAS_ATOMIC { // if C(i,j) is already present (cb==1), and the // monoid can be done atomically, then do the // atomic update. No need to modify Cb [pC]. GB_ATOMIC_READ cb = Cb [pC] ; // grab the entry if (cb == 1) { #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif continue ; // C(i,j) has been updated } } #endif do // lock the entry { // do this atomically: // { cb = Cb [pC] ; Cb [pC] = 7 ; } GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ; } while (cb == 7) ; // lock owner gets 0 or 1 if (cb == 0) { // C(i,j) is a new entry GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t task_cnvals++ ; } else // cb == 1 { // C(i,j) is already present #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif } GB_ATOMIC_WRITE Cb [pC] = 1 ; // unlock the entry } #endif } } cnvals += task_cnvals ; } } else { //---------------------------------------------------------------------- // C<#M> += A*B using fine tasks and workspace, with no atomics //---------------------------------------------------------------------- // Each fine task is given size-cvlen workspace to compute its result // in the first phase, W(:,tid) = A(:,k1:k2) * B(k1:k2,j), where k1:k2 // is defined by the fine_tid of the task. The workspaces are then // summed into C in the second phase. if (B_iso) { // No special cases needed. GB_GET_B_kj (bkj = B(k,j)) // handles the B iso case. } //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- size_t workspace = cvlen * ntasks ; size_t cxsize = (GB_IS_ANY_PAIR_SEMIRING) ? 0 : GB_CSIZE ; Wf = GB_MALLOC_WERK (workspace, int8_t, &Wf_size) ; Wcx = GB_MALLOC_WERK (workspace * cxsize, GB_void, &Wcx_size) ; if (Wf == NULL || Wcx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // first phase: W (:,tid) = A (:,k1:k2) * B (k2:k2,j) for each fine task //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vector of B and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(:,j) and B(:,j). Its fine task // id ranges from 0 to nfine_tasks_per_vector-1, and determines // which slice of A to operate on. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t kfirst = A_slice [fine_tid] ; int64_t klast = A_slice [fine_tid + 1] ; int64_t pB_start = j * bvlen ; // pointer to B(:,j) int64_t pC_start = j * cvlen ; // pointer to C(:,j), for bitmap int64_t pW_start = tid * cvlen ; // pointer to W(:,tid) GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ* int64_t task_cnvals = 0 ; // for Hf and Hx Gustavason workspace: use W(:,tid): int8_t *restrict Hf = Wf + pW_start ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + (pW_start * cxsize)) ; #endif #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // clear Hf //------------------------------------------------------------------ memset (Hf, 0, cvlen) ; //------------------------------------------------------------------ // W<#M> = A(:,k1:k2) * B(k1:k2,j) //------------------------------------------------------------------ for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // W<#M>(:,tid) += A(:,k) * B(k,j) //-------------------------------------------------------------- int64_t k = GBH (Ah, kk) ; // k in range k1:k2 int64_t pB = pB_start + k ; // get pointer to B(k,j) if (!GBB (Bb, pB)) continue ; int64_t pA = Ap [kk] ; int64_t pA_end = Ap [kk+1] ; GB_GET_B_kj ; // bkj = B(k,j) for ( ; pA < pA_end ; pA++) { //---------------------------------------------------------- // get A(i,k) //---------------------------------------------------------- int64_t i = Ai [pA] ; // get A(i,k) index //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { // M is sparse or hypersparse int64_t pC = pC_start + i ; int8_t cb = Cb [pC] ; bool mij = ((cb & 2) != 0) ^ Mask_comp ; if (!mij) continue ; } #elif GB_MASK_IS_BITMAP_OR_FULL { // M is bitmap or full int64_t pC = pC_start + i ; GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; } #endif //---------------------------------------------------------- // W<#M>(i) += A(i,k) * B(k,j) //---------------------------------------------------------- #if GB_IS_ANY_PAIR_SEMIRING { Hf [i] = 1 ; } #else { GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) if (Hf [i] == 0) { // W(i,j) is a new entry GB_HX_WRITE (i, t) ; // Hx(i) = t Hf [i] = 1 ; } else { // W(i) is already present GB_HX_UPDATE (i, t) ; // Hx(i) += t } } #endif } } } //---------------------------------------------------------------------- // second phase: C<#M> += reduce (W) //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the W and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(i1:i2,j) and W(i1:i2,w1:w2), where // i1:i2 is defined by the fine task id. Its fine task id ranges // from 0 to nfine_tasks_per_vector-1. // w1:w2 are the updates to C(:,j), where w1:w2 = // [j*nfine_tasks_per_vector : (j+1)*nfine_tasks_per_vector-1]. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t istart, iend ; GB_PARTITION (istart, iend, cvlen, fine_tid, nfine_tasks_per_vector) ; int64_t pC_start = j * cvlen ; // pointer to C(:,j) int64_t wstart = j * nfine_tasks_per_vector ; int64_t wend = (j + 1) * nfine_tasks_per_vector ; int64_t task_cnvals = 0 ; // Hx = (typecasted) Wcx workspace, use Wf as-is #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_CTYPE *restrict Hx = ((GB_CTYPE *) Wcx) ; #endif #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // C<#M>(i1:i2,j) += reduce (W (i2:i2, wstart:wend)) //------------------------------------------------------------------ for (int64_t w = wstart ; w < wend ; w++) { //-------------------------------------------------------------- // C<#M>(i1:i2,j) += W (i1:i2,w) //-------------------------------------------------------------- int64_t pW_start = w * cvlen ; // pointer to W (:,w) for (int64_t i = istart ; i < iend ; i++) { //---------------------------------------------------------- // get pointer and bitmap C(i,j) and W(i,w) //---------------------------------------------------------- int64_t pW = pW_start + i ; // pointer to W(i,w) if (Wf [pW] == 0) continue ; // skip if not present int64_t pC = pC_start + i ; // pointer to C(i,j) int8_t cb = Cb [pC] ; // bitmap status of C(i,j) //---------------------------------------------------------- // M(i,j) already checked, but adjust Cb if M is sparse //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { // M is sparse or hypersparse cb = (cb & 1) ; } #endif //---------------------------------------------------------- // C(i,j) += W (i,w) //---------------------------------------------------------- if (cb == 0) { // C(i,j) = W(i,w) GB_CIJ_GATHER (pC, pW) ; Cb [pC] = keep ; task_cnvals++ ; } else { // C(i,j) += W(i,w) GB_CIJ_GATHER_UPDATE (pC, pW) ; } } } cnvals += task_cnvals ; } } }
Cycle.c
/* * The MIT License * * Copyright 2020 The OpenNARS authors. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "Cycle.h" static long conceptProcessID = 0; //avoids duplicate concept processing #define RELATED_CONCEPTS_FOREACH(TERM, CONCEPT, BODY) \ for(int _i_=0; _i_<UNIFICATION_DEPTH; _i_++) \ { \ ConceptChainElement chain_extended = { .c = Memory_FindConceptByTerm(TERM), .next = InvertedAtomIndex_GetConceptChain((TERM)->atoms[_i_]) }; \ ConceptChainElement* chain = &chain_extended; \ while(chain != NULL) \ { \ Concept *CONCEPT = chain->c; \ chain = chain->next; \ if(CONCEPT != NULL && CONCEPT->processID != conceptProcessID) \ { \ CONCEPT->processID = conceptProcessID; \ BODY \ } \ } \ } //doing inference within the matched concept, returning whether decisionMaking should continue static Decision Cycle_ActivateSensorimotorConcept(Concept *c, Event *e, long currentTime) { Decision decision = {0}; if(e->truth.confidence > MIN_CONFIDENCE) { c->usage = Usage_use(c->usage, currentTime, false); //add event as spike to the concept: if(e->type == EVENT_TYPE_BELIEF) { c->belief_spike = *e; } else { //pass spike if the concept doesn't have a satisfying motor command decision = Decision_Suggest(c, e, currentTime); } } return decision; } //Process an event, by creating a concept, or activating an existing static Decision Cycle_ProcessSensorimotorEvent(Event *e, long currentTime) { Decision best_decision = {0}; //add a new concept for e if not yet existing Memory_Conceptualize(&e->term, currentTime); e->processed = true; //determine the concept it is related to bool e_hasVariable = Variable_hasVariable(&e->term, true, true, true); conceptProcessID++; //process the to e related concepts RELATED_CONCEPTS_FOREACH(&e->term, c, { Event ecp = *e; if(!e_hasVariable) //concept matched to the event which doesn't have variables { Substitution subs = Variable_Unify(&c->term, &e->term); //concept with variables, if(subs.success) { ecp.term = e->term; Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime); if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized)) { best_decision = decision; } } } else { Substitution subs = Variable_Unify(&e->term, &c->term); //event with variable matched to concept if(subs.success) { bool success; ecp.term = Variable_ApplySubstitute(e->term, subs, &success); if(success) { Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime); if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized)) { best_decision = decision; } } } } }) return best_decision; } void Cycle_PopEvents(Event *selectionArray, double *selectionPriority, int *selectedCnt, PriorityQueue *queue, int cnt) { *selectedCnt = 0; for(int i=0; i<cnt; i++) { Event *e; double priority = 0; if(!PriorityQueue_PopMax(queue, (void**) &e, &priority)) { assert(queue->itemsAmount == 0, "No item was popped, only acceptable reason is when it's empty"); IN_DEBUG( puts("Selecting event failed, maybe there is no event left."); ) break; } selectionPriority[*selectedCnt] = priority; selectionArray[*selectedCnt] = *e; //needs to be copied because will be added in a batch (*selectedCnt)++; //that while processing, would make recycled pointers invalid to use } } //Derive a subgoal from a sequence goal //{Event (a &/ b)!, Event a.} |- Event b! Truth_Deduction //if Truth_Expectation(a) >= ANTICIPATION_THRESHOLD else //{Event (a &/ b)!} |- Event a! Truth_StructuralDeduction bool Cycle_GoalSequenceDecomposition(Event *selectedGoal, double selectedGoalPriority) { //1. Extract potential subgoals if(!Narsese_copulaEquals(selectedGoal->term.atoms[0], '+')) //left-nested sequence { return false; } Term componentGoalsTerm[MAX_SEQUENCE_LEN+1] = {0}; Term cur_seq = selectedGoal->term; int i=0; for(; Narsese_copulaEquals(cur_seq.atoms[0], '+'); i++) { assert(i<=MAX_SEQUENCE_LEN, "The sequence was longer than MAX_SEQUENCE_LEN, change your input or increase the parameter!"); componentGoalsTerm[i] = Term_ExtractSubterm(&cur_seq, 2); cur_seq = Term_ExtractSubterm(&cur_seq, 1); } componentGoalsTerm[i] = cur_seq; //the last element at this point //2. Find first subgoal which isn't fulfilled int lastComponentOccurrenceTime = -1; Event newGoal = Inference_EventUpdate(selectedGoal, currentTime); int j=i; for(; j>=0; j--) { Term *componentGoal = &componentGoalsTerm[j]; Substitution best_subs = {0}; Concept *best_c = NULL; double best_exp = 0.0; //the concept with belief event of highest truth exp conceptProcessID++; RELATED_CONCEPTS_FOREACH(componentGoal, c, { if(!Variable_hasVariable(&c->term, true, true, true)) //concept matched to the event which doesn't have variables { Substitution subs = Variable_Unify(componentGoal, &c->term); //event with variable matched to concept if(subs.success) { bool success = true; if(c->belief_spike.type != EVENT_TYPE_DELETED) { //check whether the temporal order is violated if(c->belief_spike.occurrenceTime < lastComponentOccurrenceTime) { continue; } //check whether belief is too weak (not recent enough or not true enough) if(Truth_Expectation(Truth_Projection(c->belief_spike.truth, c->belief_spike.occurrenceTime, currentTime)) < CONDITION_THRESHOLD) { continue; } //check whether the substitution works for the subgoals coming after it for(int u=j-1; u>=0; u--) { bool goalsubs_success; Variable_ApplySubstitute(componentGoalsTerm[u], subs, &goalsubs_success); if(!goalsubs_success) { success = false; break; } } //Use this specific concept for subgoaling if it has the strongest belief event if(success) { double expectation = Truth_Expectation(Truth_Projection(c->belief_spike.truth, c->belief_spike.occurrenceTime, currentTime)); if(expectation > best_exp) { best_exp = expectation; best_c = c; best_subs = subs; } } } } } //no need to search another concept, as it didn't have a var so the concept we just iterated is the only one if(!Variable_hasVariable(componentGoal, true, true, true)) { goto DONE_CONCEPT_ITERATING; } }) DONE_CONCEPT_ITERATING: //no corresponding belief if(best_c == NULL) { break; } //all components fulfilled? Then nothing to do if(j == 0) { return true; } //Apply substitution implied by the event satisfying the current subgoal to the next subgoals for(int u=j-1; u>=0; u--) { bool goalsubs_success; componentGoalsTerm[u] = Variable_ApplySubstitute(componentGoalsTerm[u], best_subs, &goalsubs_success); assert(goalsubs_success, "Cycle_GoalSequenceDecomposition: The subsitution succeeded before but not now!"); } //build component subgoal according to {(a, b)!, a} |- b! Truth_Deduction lastComponentOccurrenceTime = best_c->belief_spike.occurrenceTime; newGoal = Inference_GoalSequenceDeduction(&newGoal, &best_c->belief_spike, currentTime); newGoal.term = componentGoalsTerm[j-1]; } if(j == i) //we derive first component according to {(a,b)!} |- a! Truth_StructuralDeduction { newGoal.term = componentGoalsTerm[i]; newGoal.truth = Truth_StructuralDeduction(newGoal.truth, newGoal.truth); } Memory_AddEvent(&newGoal, currentTime, selectedGoalPriority * Truth_Expectation(newGoal.truth), 0, false, true, false, false, false); return true; } //Propagate subgoals, leading to decisions static void Cycle_ProcessInputGoalEvents(long currentTime) { Decision best_decision = {0}; //process selected goals for(int i=0; i<goalsSelectedCnt; i++) { Event *goal = &selectedGoals[i]; IN_DEBUG( fputs("selected goal ", stdout); Narsese_PrintTerm(&goal->term); puts(""); ) //if goal is a sequence, overwrite with first deduced non-fulfilled element if(Cycle_GoalSequenceDecomposition(goal, selectedGoalsPriority[i])) //the goal was a sequence which leaded to a subgoal derivation { continue; } Decision decision = Cycle_ProcessSensorimotorEvent(goal, currentTime); if(decision.execute && decision.desire > best_decision.desire && (!best_decision.specialized || decision.specialized)) { best_decision = decision; } } if(best_decision.execute && best_decision.operationID > 0) { //reset cycling goal events after execution to avoid "residue actions" PriorityQueue_INIT(&cycling_goal_events, cycling_goal_events.items, cycling_goal_events.maxElements); //also don't re-add the selected goal: goalsSelectedCnt = 0; //execute decision Decision_Execute(&best_decision); } //pass goal spikes on to the next for(int i=0; i<goalsSelectedCnt && !best_decision.execute; i++) { Event *goal = &selectedGoals[i]; conceptProcessID++; //process subgoaling for the related concepts for each selected goal RELATED_CONCEPTS_FOREACH(&goal->term, c, { if(Variable_Unify(&c->term, &goal->term).success) { bool revised; c->goal_spike = Inference_RevisionAndChoice(&c->goal_spike, goal, currentTime, &revised); for(int opi=NOP_SUBGOALING ? 0 : 1; opi<=OPERATIONS_MAX; opi++) { for(int j=0; j<c->precondition_beliefs[opi].itemsAmount; j++) { Implication *imp = &c->precondition_beliefs[opi].array[j]; if(!Memory_ImplicationValid(imp)) { Table_Remove(&c->precondition_beliefs[opi], j); j--; continue; } Term postcondition = Term_ExtractSubterm(&imp->term, 2); Substitution subs = Variable_Unify(&postcondition, &c->goal_spike.term); Implication updated_imp = *imp; bool success; updated_imp.term = Variable_ApplySubstitute(updated_imp.term, subs, &success); if(success) { Event newGoal = Inference_GoalDeduction(&c->goal_spike, &updated_imp, currentTime); Event newGoalUpdated = Inference_EventUpdate(&newGoal, currentTime); IN_DEBUG( fputs("derived goal ", stdout); Narsese_PrintTerm(&newGoalUpdated.term); puts(""); ) Memory_AddEvent(&newGoalUpdated, currentTime, selectedGoalsPriority[i] * Truth_Expectation(newGoalUpdated.truth), 0, false, true, false, false, false); } } } } }) } } //Reinforce link between concept a and b (creating it if non-existent) static void Cycle_ReinforceLink(Event *a, Event *b) { if(a->type != EVENT_TYPE_BELIEF || b->type != EVENT_TYPE_BELIEF) { return; } Term a_term_nop = Narsese_GetPreconditionWithoutOp(&a->term); Concept *A = Memory_FindConceptByTerm(&a_term_nop); Concept *B = Memory_FindConceptByTerm(&b->term); if(A != NULL && B != NULL && A != B) { //temporal induction if(!Stamp_checkOverlap(&a->stamp, &b->stamp)) { bool success; Implication precondition_implication = Inference_BeliefInduction(a, b, &success); if(success) { precondition_implication.sourceConcept = A; precondition_implication.sourceConceptId = A->id; if(precondition_implication.truth.confidence >= MIN_CONFIDENCE) { //extensional var intro: bool success; Term general_implication_term_ext = IntroduceImplicationVariables(precondition_implication.term, &success, true); if(success && Variable_hasVariable(&general_implication_term_ext, true, true, false)) { NAL_DerivedEvent(general_implication_term_ext, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0); } //intensional var intro: bool success2; Term general_implication_term_int = IntroduceImplicationVariables(precondition_implication.term, &success2, false); if(success2 && Variable_hasVariable(&general_implication_term_int, true, true, false)) { NAL_DerivedEvent(general_implication_term_int, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0); } //specific implication NAL_DerivedEvent(precondition_implication.term, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0); } } } } } void Cycle_PushEvents(long currentTime) { for(int i=0; i<beliefsSelectedCnt; i++) { Memory_AddEvent(&selectedBeliefs[i], currentTime, selectedBeliefsPriority[i], 0, false, false, true, false, false); } for(int i=0; i<goalsSelectedCnt; i++) { Memory_AddEvent(&selectedGoals[i], currentTime, selectedGoalsPriority[i], 0, false, false, true, false, false); } } void Cycle_ProcessInputBeliefEvents(long currentTime) { //1. process newest event if(belief_events.itemsAmount > 0) { //form concepts for the sequences of different length for(int state=(1 << MAX_SEQUENCE_LEN)-1; state>=1; state--) { Event *toProcess = FIFO_GetNewestSequence(&belief_events, state); if(toProcess != NULL && !toProcess->processed && toProcess->type != EVENT_TYPE_DELETED) { assert(toProcess->type == EVENT_TYPE_BELIEF, "A different event type made it into belief events!"); Cycle_ProcessSensorimotorEvent(toProcess, currentTime); Event postcondition = *toProcess; //Mine for <(&/,precondition,operation) =/> postcondition> patterns in the FIFO: if(state == 1) //postcondition always len1 { int op_id = Narsese_getOperationID(&postcondition.term); Decision_Anticipate(op_id, currentTime); //collection of negative evidence, new way for(int k=1; k<belief_events.itemsAmount; k++) { for(int state2=1; state2<(1 << MAX_SEQUENCE_LEN); state2++) { Event *precondition = FIFO_GetKthNewestSequence(&belief_events, k, state2); if(precondition != NULL && precondition->type != EVENT_TYPE_DELETED) { if(state2 > 1) { int substate = state2; int shift = 0; while(substate) { substate = (substate >> 1); shift++; if(substate & 1) { if(k+shift < FIFO_SIZE) { Event *potential_op = FIFO_GetKthNewestSequence(&belief_events, k+shift, 1); if(potential_op != NULL && potential_op->type != EVENT_TYPE_DELETED && Narsese_isOperation(&potential_op->term)) { goto CONTINUE; } } } } } Cycle_ReinforceLink(precondition, &postcondition); CONTINUE:; } } } } } } } } void Cycle_Inference(long currentTime) { //Inferences #if STAGE==2 for(int i=0; i<beliefsSelectedCnt; i++) { conceptProcessID++; //process the related belief concepts long countConceptsMatched = 0; for(;;) { long countConceptsMatchedNew = 0; //Adjust dynamic firing threshold: (proportional "self"-control) double conceptPriorityThresholdCurrent = conceptPriorityThreshold; long countConceptsMatchedAverage = Stats_countConceptsMatchedTotal / currentTime; double set_point = BELIEF_CONCEPT_MATCH_TARGET; double process_value = countConceptsMatchedAverage; double error = process_value - set_point; double increment = error*CONCEPT_THRESHOLD_ADAPTATION; conceptPriorityThreshold = MIN(1.0, MAX(0.0, conceptPriorityThreshold + increment)); //IN_DEBUG( printf("conceptPriorityThreshold=%f\n", conceptPriorityThreshold); ) Event *e = &selectedBeliefs[i]; double priority = selectedBeliefsPriority[i]; Term dummy_term = {0}; Truth dummy_truth = {0}; RuleTable_Apply(e->term, dummy_term, e->truth, dummy_truth, e->occurrenceTime, 0, e->stamp, currentTime, priority, 1, false, NULL, 0); RELATED_CONCEPTS_FOREACH(&e->term, c, { long validation_cid = c->id; //allows for lockfree rule table application (only adding to memory is locked) if(c->priority < conceptPriorityThresholdCurrent) { continue; } countConceptsMatchedNew++; countConceptsMatched++; Stats_countConceptsMatchedTotal++; if(c->belief.type != EVENT_TYPE_DELETED && countConceptsMatched <= BELIEF_CONCEPT_MATCH_TARGET) { //use eternal belief as belief Event* belief = &c->belief; Event future_belief = c->predicted_belief; //but if there is a predicted one in the event's window, use this one if(e->occurrenceTime != OCCURRENCE_ETERNAL && future_belief.type != EVENT_TYPE_DELETED && labs(e->occurrenceTime - future_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger { future_belief.truth = Truth_Projection(future_belief.truth, future_belief.occurrenceTime, e->occurrenceTime); future_belief.occurrenceTime = e->occurrenceTime; belief = &future_belief; } //unless there is an actual belief which falls into the event's window Event project_belief = c->belief_spike; if(e->occurrenceTime != OCCURRENCE_ETERNAL && project_belief.type != EVENT_TYPE_DELETED && labs(e->occurrenceTime - project_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger { project_belief.truth = Truth_Projection(project_belief.truth, project_belief.occurrenceTime, e->occurrenceTime); project_belief.occurrenceTime = e->occurrenceTime; belief = &project_belief; } //Check for overlap and apply inference rules if(!Stamp_checkOverlap(&e->stamp, &belief->stamp)) { c->usage = Usage_use(c->usage, currentTime, false); Stamp stamp = Stamp_make(&e->stamp, &belief->stamp); if(PRINT_CONTROL_INFO) { fputs("Apply rule table on ", stdout); Narsese_PrintTerm(&e->term); printf(" Priority=%f\n", priority); fputs(" and ", stdout); Narsese_PrintTerm(&c->term); puts(""); } long occurrenceTimeDistance = 0; if(belief->occurrenceTime != OCCURRENCE_ETERNAL && e->occurrenceTime != OCCURRENCE_ETERNAL) { occurrenceTimeDistance = labs(belief->occurrenceTime - e->occurrenceTime); } RuleTable_Apply(e->term, c->term, e->truth, belief->truth, e->occurrenceTime, occurrenceTimeDistance, stamp, currentTime, priority, c->priority, true, c, validation_cid); } } }) if(countConceptsMatched > Stats_countConceptsMatchedMax) { Stats_countConceptsMatchedMax = countConceptsMatched; } if(countConceptsMatched >= BELIEF_CONCEPT_MATCH_TARGET || countConceptsMatchedNew == 0) { break; } } } #endif } void Cycle_Prediction(long currentTime) { for(int h=0; h<beliefsSelectedCnt; h++) { Event *e = &selectedBeliefs[h]; double parentpriority = selectedBeliefsPriority[h]; #pragma omp parallel for for(int j=0; j<concepts.itemsAmount; j++) { Concept *c = concepts.items[j].address; if(c->priority < conceptPriorityThreshold) { continue; } for(int k=0; k<c->precondition_beliefs[0].itemsAmount; k++) { if(!Memory_ImplicationValid(&c->precondition_beliefs[0].array[k])) { Table_Remove(&c->precondition_beliefs[0], k--); continue; } Implication *imp = &c->precondition_beliefs[0].array[k]; Term precondition = Term_ExtractSubterm(&imp->term, 1); Substitution subs = Variable_Unify(&precondition, &e->term); if(subs.success) { assert(Narsese_copulaEquals(imp->term.atoms[0],'$'), "Not a valid implication term!"); Concept *c_pre = Memory_FindConceptByTerm(&precondition); if(c_pre != NULL) { Substitution subs = Variable_Unify(&precondition, &e->term); Implication updated_imp = *imp; bool success; updated_imp.term = Variable_ApplySubstitute(updated_imp.term, subs, &success); if(success) { Event predicted = Inference_BeliefDeduction(e, &updated_imp); #pragma omp critical(Memory) { Memory_AddEvent(&predicted, currentTime, parentpriority*Truth_Expectation(predicted.truth), 0, false, true, false, false, true); } } } } } } } } void Cycle_RelativeForgetting(long currentTime) { //Apply event forgetting: for(int i=0; i<cycling_belief_events.itemsAmount; i++) { cycling_belief_events.items[i].priority *= EVENT_DURABILITY; } for(int i=0; i<cycling_goal_events.itemsAmount; i++) { cycling_goal_events.items[i].priority *= EVENT_DURABILITY; } //Apply concept forgetting: for(int i=0; i<concepts.itemsAmount; i++) { Concept *c = concepts.items[i].address; c->priority *= CONCEPT_DURABILITY; concepts.items[i].priority = Usage_usefulness(c->usage, currentTime); //how concept memory is sorted by, by concept usefulness } //BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE if(ontology_handling) { //BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE for(int i=0; i<concepts.itemsAmount; i++) { Concept *c = concepts.items[i].address; if(c->hasUserKnowledge) { c->usage = Usage_use(c->usage, currentTime, false); //user implication won't be forgotten } } } //END SPECIAL HANDLING FOR USER KNOWLEDGE //Re-sort queues PriorityQueue_Rebuild(&concepts); PriorityQueue_Rebuild(&cycling_belief_events); PriorityQueue_Rebuild(&cycling_goal_events); } void Cycle_Perform(long currentTime) { Metric_send("NARNode.Cycle", 1); //1. Retrieve BELIEF/GOAL_EVENT_SELECTIONS events from cyclings events priority queue (which includes both input and derivations) Cycle_PopEvents(selectedGoals, selectedGoalsPriority, &goalsSelectedCnt, &cycling_goal_events, GOAL_EVENT_SELECTIONS); Cycle_PopEvents(selectedBeliefs, selectedBeliefsPriority, &beliefsSelectedCnt, &cycling_belief_events, BELIEF_EVENT_SELECTIONS); //2. Process incoming belief events from FIFO, building implications utilizing input sequences Cycle_ProcessInputBeliefEvents(currentTime); //3. Process incoming goal events, propagating subgoals according to implications, triggering decisions when above decision threshold Cycle_ProcessInputGoalEvents(currentTime); //4. Perform inference between in 1. retrieved events and semantically/temporally related, high-priority concepts to derive and process new events Cycle_Inference(currentTime); Cycle_Prediction(currentTime); //5. Apply relative forgetting for concepts according to CONCEPT_DURABILITY and events according to BELIEF_EVENT_DURABILITY Cycle_RelativeForgetting(currentTime); //6. Push in 1. selected events back to the queue as well, applying relative forgetting based on BELIEF_EVENT_DURABILITY_ON_USAGE Cycle_PushEvents(currentTime); }
softmax_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haitao@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "softmax_param.h" #include <math.h> /** * @brief softmax function * @param[in] vec_in pointer to input vector * @param[in] dim_vec input vector dimention * @param[out] p_out pointer to output vector * @return none. * */ static void GetMaxArray(void* input, void* array, int in_size, int on_size, int num_thread) { float* input_ptr = ( float* )input; float* array_ptr = ( float* )array; memcpy(array_ptr, input_ptr, in_size * sizeof(float)); // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { for (int l = 0; l < in_size; l++) { if (array_ptr[l] < input_ptr[j * in_size + l]) array_ptr[l] = input_ptr[j * in_size + l]; } } } static void GetOutResult(void* input, void* output, void* array, void* sum_array, int in_size, int on_size, int num_thread) { float* input_ptr = ( float* )input; float* output_ptr = ( float* )output; float* array_ptr = ( float* )array; float* sum_array_ptr = ( float* )sum_array; memset(sum_array, 0x0, in_size * sizeof(float)); /* get the exp and the summary */ // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { for (int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] = exp(input_ptr[index] - array_ptr[l]); sum_array_ptr[l] += output_ptr[index]; } } /* the final result */ // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { for (int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] /= sum_array_ptr[l]; } } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct softmax_param* softmax_param = ( struct softmax_param* )ir_node->op.param_mem; int element_size = input_tensor->elem_size; int type = input_tensor->data_type; int dims[4]; for (int i = 0; i < input_tensor->dim_num; i++) { dims[i] = input_tensor->dims[i]; } int axis = softmax_param->axis; int out_size, in_size, on_size; out_size = 1; for (int i = 0; i < axis; i++) { out_size *= dims[i]; } in_size = 1; for (size_t i = axis + 1; i < input_tensor->dim_num; i++) { in_size *= dims[i]; } on_size = dims[axis]; uint8_t* input = input_tensor->data; uint8_t* output = output_tensor->data; float* max_array = ( float* )malloc(in_size * sizeof(float)); float* sum_array = ( float* )malloc(in_size * sizeof(float)); int on_in_size = on_size * in_size; if (type == TENGINE_DT_UINT8) { int totol_size = on_in_size * out_size; float* input_f = ( float* )malloc(totol_size * 4); float* output_f = ( float* )malloc(totol_size * 4); float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; uint8_t input_zero = input_tensor->zero_point; uint8_t output_zero = output_tensor->zero_point; /* dequant to fp32 */ for (int i = 0; i < out_size; i++) for (int j = 0; j < on_in_size; j++) input_f[i * on_in_size + j] = (input[i * on_in_size + j] - input_zero) * input_scale; /* fp32 softmax */ for (int i = 0; i < out_size; i++) { /* get max */ int img_base = i * in_size * on_size; GetMaxArray(input_f + img_base, max_array, in_size, on_size, exec_graph->num_thread); GetOutResult(input_f + img_base, output_f + img_base, max_array, sum_array, in_size, on_size, exec_graph->num_thread); } /* quant to uint8 */ for (int i = 0; i < out_size; i++) { for (int j = 0; j < on_in_size; j++) { int udata = (int)(round(output_f[i * on_in_size + j] / output_scale) + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output[i * on_in_size + j] = udata; } } free(input_f); free(output_f); } else { for (int i = 0; i < out_size; i++) { /* get max */ int img_base = i * on_in_size * element_size; GetMaxArray(input + img_base, max_array, in_size, on_size, exec_graph->num_thread); GetOutResult(input + img_base, output + img_base, max_array, sum_array, in_size, on_size, exec_graph->num_thread); } } free(max_array); free(sum_array); return 0; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_softmax_hcl_ops(void* arg) { return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); } static int unreg_softmax_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_softmax_hcl_ops); AUTO_UNREGISTER_OPS(unreg_softmax_hcl_ops);
tree-pretty-print.c
/* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "diagnostic.h" #include "real.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" /* Local functions, macros and variables. */ static int op_prio (tree); static const char *op_symbol_1 (enum tree_code); static const char *op_symbol (tree); static void pretty_print_string (pretty_printer *, const char*); static void print_call_name (pretty_printer *, tree); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_declaration (pretty_printer *, tree, int, int); static void print_struct_decl (pretty_printer *, tree, int, int); static void do_niy (pretty_printer *, tree); static void dump_vops (pretty_printer *, tree, int, int); static void dump_generic_bb_buff (pretty_printer *, basic_block, int, int); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) #define PRINT_FUNCTION_NAME(NODE) pp_printf \ (buffer, "%s", TREE_CODE (NODE) == NOP_EXPR ? \ lang_hooks.decl_printable_name (TREE_OPERAND (NODE, 0), 1) : \ lang_hooks.decl_printable_name (NODE, 1)) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *buffer, tree node) { int i, len; pp_string (buffer, "<<< Unknown tree: "); pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]); if (EXPR_P (node)) { len = TREE_CODE_LENGTH (TREE_CODE (node)); for (i = 0; i < len; ++i) { newline_and_indent (buffer, 2); dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false); } } pp_string (buffer, " >>>\n"); } void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_UID); fprintf (stderr, "\n"); } void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_UID); fprintf (stderr, "\n"); } void debug_tree_chain (tree t) { while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_UID); fprintf(stderr, " "); t = TREE_CHAIN (t); } fprintf (stderr, "\n"); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { tree t = node; if (DECL_NAME (t)) pp_tree_identifier (buffer, DECL_NAME (t)); if ((flags & TDF_UID) || DECL_NAME (t) == NULL_TREE) { if (TREE_CODE (t) == LABEL_DECL && LABEL_DECL_UID (t) != -1) pp_printf (buffer, "L." HOST_WIDE_INT_PRINT_DEC, LABEL_DECL_UID (t)); else { char c = TREE_CODE (t) == CONST_DECL ? 'C' : 'D'; pp_printf (buffer, "%c.%u", c, DECL_UID (t)); } } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *buffer, tree node) { if (DECL_NAME (node)) PRINT_FUNCTION_NAME (node); else dump_decl_name (buffer, node, 0); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. The last element in the list is a VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES (node); while (arg && TREE_CHAIN (arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } if (!wrote_arg) pp_string (buffer, "void"); pp_character (buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) { pp_character (buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && host_integerp (max, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); else { if (min) dump_generic_node (buffer, min, spc, flags, false); pp_character (buffer, ':'); if (max) dump_generic_node (buffer, max, spc, flags, false); } } else pp_string (buffer, "<unknown>"); pp_character (buffer, ']'); } /* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string (buffer, name); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string (buffer, "reduction("); pp_string (buffer, op_symbol_1 (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_character (buffer, ':'); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_IF: pp_string (buffer, "if("); dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string (buffer, "num_threads("); dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string (buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string (buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (buffer, "private"); break; default: gcc_unreachable (); } pp_character (buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string (buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND (clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (buffer, "runtime"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_character (buffer, ','); dump_generic_node (buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_character (buffer, ')'); break; default: /* Should never happen. */ dump_generic_node (buffer, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space (buffer); while (1) { dump_omp_clause (buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (buffer); } } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node); if (TREE_CODE (node) != ERROR_MARK && is_gimple_stmt (node) && (flags & TDF_VOPS) && stmt_ann (node) && TREE_CODE (node) != PHI_NODE) dump_vops (buffer, node, spc, flags); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) { expanded_location xloc = expand_location (EXPR_LOCATION (node)); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_BINFO: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); } dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class class; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); class = TREE_CODE_CLASS (TREE_CODE (node)); if (class == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, "<unnamed type decl>"); } else if (class == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (buffer, "vector "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else pp_string (buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile"); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case METHOD_TYPE: dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); pp_string (buffer, "::"); break; case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (buffer, "MEM["); tmp = TMR_SYMBOL (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "symbol: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_BASE (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "index: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "step: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "offset: "); dump_generic_node (buffer, tmp, spc, flags, false); } pp_string (buffer, "]"); if (flags & TDF_DETAILS) { pp_string (buffer, "{"); dump_generic_node (buffer, TMR_ORIGINAL (node), spc, flags, false); pp_string (buffer, "}"); } } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else print_struct_decl (buffer, node, spc, flags); break; case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (! host_integerp (node, 0)) { tree val = node; if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); val = build_int_cst_wide (NULL_TREE, -TREE_INT_CST_LOW (val), ~TREE_INT_CST_HIGH (val) + !TREE_INT_CST_LOW (val)); } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ { static char format[10]; /* "%x%09999x\0" */ if (!format[0]) sprintf (format, "%%x%%0%dx", HOST_BITS_PER_INT / 4); sprintf (pp_buffer (buffer)->digit_buffer, format, TREE_INT_CST_HIGH (val), TREE_INT_CST_LOW (val)); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } } else pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "<L" HOST_WIDE_INT_PRINT_DEC ">", LABEL_DECL_UID (node)); else pp_printf (buffer, "<D%u>", DECL_UID (node)); break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } break; case SYMBOL_MEMORY_TAG: case NAME_MEMORY_TAG: case STRUCT_FIELD_TAG: case VAR_DECL: case PARM_DECL: case FIELD_DECL: case NAMESPACE_DECL: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (TREE_CODE (op0) == INDIRECT_REF) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (op0) != VALUE_HANDLE) { op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || (TYPE_SIZE_UNIT (TREE_TYPE (node)) && !operand_equal_p (op1, TYPE_SIZE_UNIT (TREE_TYPE (node)), 0))) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = FALSE; pp_character (buffer, '{'); if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = TRUE; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field && is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, field, spc, flags, false); pp_string (buffer, "="); } if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (buffer, val, flags); else dump_generic_node (buffer, val, spc, flags, false); if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case INIT_EXPR: dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node)) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, node); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); op1 = TREE_OPERAND (node, 1); if (op1) dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); op1 = TREE_OPERAND (node, 2); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<<cleanup_point "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "<PLACEHOLDER_EXPR "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_character (buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); if (TREE_CODE (node) == MISALIGNED_INDIRECT_REF) { pp_string (buffer, "{misalignment: "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '}'); } break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case RANGE_EXPR: NIY; break; case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case NOP_EXPR: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ") "); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<<eh_filter ("); dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (buffer, ")>>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case EXC_PTR_EXPR: pp_string (buffer, "<<<exception object>>>"); break; case FILTER_EXPR: pp_string (buffer, "<<<filter object>>>"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR) dump_generic_node (buffer, TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); if (elt) { dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } else pp_string (buffer, "case ???: goto ???;"); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case RESX_EXPR: pp_string (buffer, "resx "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default "); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case PHI_NODE: { int i; dump_generic_node (buffer, PHI_RESULT (node), spc, flags, false); pp_string (buffer, " = PHI <"); for (i = 0; i < PHI_NUM_ARGS (node); i++) { dump_generic_node (buffer, PHI_ARG_DEF (node, i), spc, flags, false); pp_string (buffer, "("); pp_decimal_int (buffer, PHI_ARG_EDGE (node, i)->src->index); pp_string (buffer, ")"); if (i < PHI_NUM_ARGS (node) - 1) pp_string (buffer, ", "); } pp_string (buffer, ">;"); } break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (buffer, "(ab)"); break; case WITH_SIZE_EXPR: pp_string (buffer, "WITH_SIZE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case VALUE_HANDLE: pp_printf (buffer, "VH.%d", VALUE_HANDLE_ID (node)); break; case ASSERT_EXPR: pp_string (buffer, "ASSERT_EXPR <"); dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false); pp_string (buffer, ">"); break; case SCEV_KNOWN: pp_string (buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string (buffer, "{"); dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false); pp_string (buffer, ", +, "); dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false); pp_string (buffer, "}_"); dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (buffer, "REALIGN_LOAD <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case VEC_COND_EXPR: pp_string (buffer, " VEC_COND_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case DOT_PROD_EXPR: pp_string (buffer, " DOT_PROD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case OMP_PARALLEL: pp_string (buffer, "#pragma omp parallel"); dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags); if (OMP_PARALLEL_FN (node)) { pp_string (buffer, " [child fn: "); dump_generic_node (buffer, OMP_PARALLEL_FN (node), spc, flags, false); pp_string (buffer, " ("); if (OMP_PARALLEL_DATA_ARG (node)) dump_generic_node (buffer, OMP_PARALLEL_DATA_ARG (node), spc, flags, false); else pp_string (buffer, "???"); pp_string (buffer, ")]"); } dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } is_expr = false; break; case OMP_FOR: pp_string (buffer, "#pragma omp for"); dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); spc += 4; newline_and_indent (buffer, spc); dump_generic_node (buffer, OMP_FOR_PRE_BODY (node), spc, flags, false); } newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, OMP_FOR_INIT (node), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, OMP_FOR_COND (node), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, OMP_FOR_INCR (node), spc, flags, false); pp_string (buffer, ")"); if (OMP_FOR_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string (buffer, "#pragma omp sections"); dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTION: pp_string (buffer, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string (buffer, "#pragma omp master"); goto dump_omp_body; case OMP_ORDERED: pp_string (buffer, "#pragma omp ordered"); goto dump_omp_body; case OMP_CRITICAL: pp_string (buffer, "#pragma omp critical"); if (OMP_CRITICAL_NAME (node)) { pp_space (buffer); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc, flags, false); pp_character (buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string (buffer, "#pragma omp atomic"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_SINGLE: pp_string (buffer, "#pragma omp single"); dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_RETURN: pp_string (buffer, "OMP_RETURN"); if (OMP_RETURN_NOWAIT (node)) pp_string (buffer, " [nowait]"); is_expr = false; break; case OMP_CONTINUE: pp_string (buffer, "OMP_CONTINUE"); is_expr = false; break; case OMP_CLAUSE: dump_omp_clause (buffer, node, spc, flags); is_expr = false; break; case REDUC_MAX_EXPR: pp_string (buffer, " REDUC_MAX_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_MIN_EXPR: pp_string (buffer, " REDUC_MIN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string (buffer, " REDUC_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case BLOCK: { tree t; pp_string (buffer, "BLOCK"); if (BLOCK_ABSTRACT (node)) pp_string (buffer, " [abstract]"); if (TREE_ASM_WRITTEN (node)) pp_string (buffer, " [written]"); newline_and_indent (buffer, spc + 2); if (BLOCK_SUPERCONTEXT (node)) { pp_string (buffer, "SUPERCONTEXT: "); if (TREE_CODE (BLOCK_SUPERCONTEXT (node)) == BLOCK) pp_printf (buffer, "BLOCK %p", (void *)BLOCK_SUPERCONTEXT (node)); else dump_generic_node (buffer, BLOCK_SUPERCONTEXT (node), 0, flags, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_SUBBLOCKS (node)) { pp_string (buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (node); t; t = BLOCK_CHAIN (t)) pp_printf (buffer, "%p ", (void *)t); newline_and_indent (buffer, spc + 2); } if (BLOCK_VARS (node)) { pp_string (buffer, "VARS: "); for (t = BLOCK_VARS (node); t; t = TREE_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (node)) { pp_string (buffer, "ABSTRACT_ORIGIN: "); if (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (node)) == BLOCK) pp_printf (buffer, "BLOCK %p", (void *)BLOCK_ABSTRACT_ORIGIN (node)); else dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (node), 0, flags, false); newline_and_indent (buffer, spc + 2); } } break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ static void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) { pp_string (buffer, " __asm__ "); pp_character (buffer, '('); dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_character (buffer, ')'); } /* The initial value of a function serves to determine wether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (buffer, " [value-expr: "); dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false); pp_character (buffer, ']'); } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node || (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE && TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = TREE_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator OP. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ static int op_prio (tree op) { if (op == NULL) return 9999; switch (TREE_CODE (op)) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case MINUS_EXPR: return 12; case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: return 16; case SAVE_EXPR: case NON_LVALUE_EXPR: return op_prio (TREE_OPERAND (op, 0)); default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol_1 (enum tree_code code) { switch (code) { case MODIFY_EXPR: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case ALIGN_INDIRECT_REF: return "A*"; case MISALIGNED_INDIRECT_REF: return "M*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } static const char * op_symbol (tree op) { return op_symbol_1 (TREE_CODE (op)); } /* Prints the name of a CALL_EXPR. */ static void print_call_name (pretty_printer *buffer, tree node) { tree op0; gcc_assert (TREE_CODE (node) == CALL_EXPR); op0 = TREE_OPERAND (node, 0); if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: dump_function_name (buffer, op0); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); break; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, 0, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, 0, false); break; case COMPONENT_REF: /* The function is a pointer contained in a structure. */ if (TREE_CODE (TREE_OPERAND (op0, 0)) == INDIRECT_REF || TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 1)); else dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); /* else We can have several levels of structures and a function pointer inside. This is not implemented yet... */ /* NIY;*/ break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 0)); else dump_generic_node (buffer, op0, 0, 0, false); break; case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, 0, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); } static void dump_vops (pretty_printer *buffer, tree stmt, int spc, int flags) { tree use; use_operand_p use_p; def_operand_p def_p; use_operand_p kill_p; ssa_op_iter iter; if (!ssa_operands_active ()) return; FOR_EACH_SSA_MAYDEF_OPERAND (def_p, use_p, stmt, iter) { pp_string (buffer, "# "); dump_generic_node (buffer, DEF_FROM_PTR (def_p), spc + 2, flags, false); pp_string (buffer, " = V_MAY_DEF <"); dump_generic_node (buffer, USE_FROM_PTR (use_p), spc + 2, flags, false); pp_string (buffer, ">;"); newline_and_indent (buffer, spc); } FOR_EACH_SSA_MUSTDEF_OPERAND (def_p, kill_p, stmt, iter) { pp_string (buffer, "# "); dump_generic_node (buffer, DEF_FROM_PTR (def_p), spc + 2, flags, false); pp_string (buffer, " = V_MUST_DEF <"); dump_generic_node (buffer, USE_FROM_PTR (kill_p), spc + 2, flags, false); pp_string (buffer, ">;"); newline_and_indent (buffer, spc); } FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_VUSE) { pp_string (buffer, "# VUSE <"); dump_generic_node (buffer, use, spc + 2, flags, false); pp_string (buffer, ">;"); newline_and_indent (buffer, spc); } } /* Dumps basic block BB to FILE with details described by FLAGS and indented by INDENT spaces. */ void dump_generic_bb (FILE *file, basic_block bb, int indent, int flags) { maybe_init_pretty_print (file); dump_generic_bb_buff (&buffer, bb, indent, flags); pp_flush (&buffer); } /* Dumps header of basic block BB to buffer BUFFER indented by INDENT spaces and details described by flags. */ static void dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; edge_iterator ei; if (flags & TDF_BLOCKS) { INDENT (indent); pp_string (buffer, "# BLOCK "); pp_decimal_int (buffer, bb->index); if (bb->frequency) { pp_string (buffer, " freq:"); pp_decimal_int (buffer, bb->frequency); } if (bb->count) { pp_string (buffer, " count:"); pp_widest_integer (buffer, bb->count); } if (flags & TDF_LINENO) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) if (get_lineno (bsi_stmt (bsi)) != -1) { pp_string (buffer, ", starting at line "); pp_decimal_int (buffer, get_lineno (bsi_stmt (bsi))); break; } } newline_and_indent (buffer, indent); pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->preds) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->src == ENTRY_BLOCK_PTR) pp_string (buffer, "ENTRY"); else pp_decimal_int (buffer, e->src->index); } else dump_edge_info (buffer->buffer->stream, e, 0); pp_newline (buffer); } else { stmt = first_stmt (bb); if (!stmt || TREE_CODE (stmt) != LABEL_EXPR) { INDENT (indent - 2); pp_string (buffer, "<bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">:"); pp_newline (buffer); } } pp_write_text_to_stream (buffer); check_bb_profile (bb, buffer->buffer->stream); } /* Dumps end of basic block BB to buffer BUFFER indented by INDENT spaces. */ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->succs) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->dest == EXIT_BLOCK_PTR) pp_string (buffer, "EXIT"); else pp_decimal_int (buffer, e->dest->index); } else dump_edge_info (buffer->buffer->stream, e, 1); pp_newline (buffer); } /* Dumps phi nodes of basic block BB to buffer BUFFER with details described by FLAGS indented by INDENT spaces. */ static void dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent, int flags) { tree phi = phi_nodes (bb); if (!phi) return; for (; phi; phi = PHI_CHAIN (phi)) { if (is_gimple_reg (PHI_RESULT (phi)) || (flags & TDF_VOPS)) { INDENT (indent); pp_string (buffer, "# "); dump_generic_node (buffer, phi, indent, flags, false); pp_newline (buffer); } } } /* Dump jump to basic block BB that is represented implicitly in the cfg to BUFFER. */ static void pp_cfg_jump (pretty_printer *buffer, basic_block bb) { tree stmt; stmt = first_stmt (bb); pp_string (buffer, "goto <bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">"); if (stmt && TREE_CODE (stmt) == LABEL_EXPR) { pp_string (buffer, " ("); dump_generic_node (buffer, LABEL_EXPR_LABEL (stmt), 0, 0, false); pp_string (buffer, ")"); } pp_semicolon (buffer); } /* Dump edges represented implicitly in basic block BB to BUFFER, indented by INDENT spaces, with details given by FLAGS. */ static void dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; if (e && e->dest != bb->next_bb) { INDENT (indent); if ((flags & TDF_LINENO) #ifdef USE_MAPPED_LOCATION && e->goto_locus != UNKNOWN_LOCATION #else && e->goto_locus #endif ) { expanded_location goto_xloc; #ifdef USE_MAPPED_LOCATION goto_xloc = expand_location (e->goto_locus); #else goto_xloc = *e->goto_locus; #endif pp_character (buffer, '['); if (goto_xloc.file) { pp_string (buffer, goto_xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, goto_xloc.line); pp_string (buffer, "] "); } pp_cfg_jump (buffer, e->dest); pp_newline (buffer); } } /* Dumps basic block BB to buffer BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_generic_bb_buff (pretty_printer *buffer, basic_block bb, int indent, int flags) { block_stmt_iterator bsi; tree stmt; int label_indent = indent - 2; if (label_indent < 0) label_indent = 0; dump_bb_header (buffer, bb, indent, flags); dump_phi_nodes (buffer, bb, indent, flags); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { int curr_indent; stmt = bsi_stmt (bsi); curr_indent = TREE_CODE (stmt) == LABEL_EXPR ? label_indent : indent; INDENT (curr_indent); dump_generic_node (buffer, stmt, curr_indent, flags, true); pp_newline (buffer); } dump_implicit_edges (buffer, bb, indent, flags); if (flags & TDF_BLOCKS) dump_bb_end (buffer, bb, indent, flags); }
GB_unaryop__minv_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_int64 // op(A') function: GB_tran__minv_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_int64 ( uint32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lighting.c
#include "image.h" #include <stdlib.h> #include <assert.h> #include <memory.h> #include <kazmath/vec3.h> static float _occlusion_scale = 1.0f; void heman_lighting_set_occlusion_scale(float s) { _occlusion_scale = s; } heman_image* heman_lighting_compute_normals(heman_image* heightmap) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* result = heman_image_create(width, height, 3); HEMAN_FLOAT invh = 1.0f / height; HEMAN_FLOAT invw = 1.0f / width; int maxx = width - 1; int maxy = height - 1; kmVec3* normals = (kmVec3*) result->data; int y; #pragma omp parallel for for (y = 0; y < height; y++) { HEMAN_FLOAT v = y * invh; int y1 = MIN(y + 1, maxy); kmVec3 p; kmVec3 px; kmVec3 py; kmVec3* n = normals + y * width; for (int x = 0; x < width; x++, n++) { HEMAN_FLOAT u = x * invw; int x1 = MIN(x + 1, maxx); p.x = u; p.y = v; p.z = *heman_image_texel(heightmap, x, y); px.x = u + invw; px.y = v; px.z = *heman_image_texel(heightmap, x1, y); py.x = u; py.y = v + invh; py.z = *heman_image_texel(heightmap, x, y1); kmVec3Subtract(&px, &px, &p); kmVec3Subtract(&py, &py, &p); kmVec3Cross(n, &px, &py); kmVec3Normalize(n, n); n->y *= -1; } } return result; } heman_image* heman_lighting_apply(heman_image* heightmap, heman_image* albedo, float occlusion, float diffuse, float diffuse_softening, const float* light_position) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* final = heman_image_create(width, height, 3); heman_image* normals = heman_lighting_compute_normals(heightmap); heman_image* occ = heman_lighting_compute_occlusion(heightmap); if (albedo) { assert(albedo->nbands == 3); assert(albedo->width == width); assert(albedo->height == height); } static float default_pos[] = {-0.5f, 0.5f, 1.0f}; if (!light_position) { light_position = default_pos; } kmVec3* colors = (kmVec3*) final->data; HEMAN_FLOAT invgamma = 1.0f / _gamma; kmVec3 L; L.x = light_position[0]; L.y = light_position[1]; L.z = light_position[2]; kmVec3Normalize(&L, &L); int y; #pragma omp parallel for for (y = 0; y < height; y++) { kmVec3* color = colors + y * width; for (int x = 0; x < width; x++, color++) { kmVec3* N = (kmVec3*) heman_image_texel(normals, x, y); kmVec3Lerp(N, N, &KM_VEC3_POS_Z, diffuse_softening); HEMAN_FLOAT df = 1 - diffuse * (1 - kmClamp(kmVec3Dot(N, &L), 0, 1)); HEMAN_FLOAT of = 1 - occlusion * (1 - *heman_image_texel(occ, x, y)); if (albedo) { *color = *((kmVec3*) heman_image_texel(albedo, x, y)); } else { color->x = color->y = color->z = 1; } color->x = pow(color->x, _gamma); color->y = pow(color->y, _gamma); color->z = pow(color->z, _gamma); kmVec3Scale(color, color, df * of); color->x = pow(color->x, invgamma); color->y = pow(color->y, invgamma); color->z = pow(color->z, invgamma); } } heman_image_destroy(normals); heman_image_destroy(occ); return final; } #define NUM_SCANS (16) #define INV_SCANS (1.0f / 16.0f) static HEMAN_FLOAT azimuth_slope(kmVec3 a, kmVec3 b) { kmVec3 d; kmVec3Subtract(&d, &a, &b); HEMAN_FLOAT x = kmVec3Length(&d); HEMAN_FLOAT y = b.z - a.z; return y / x; } static HEMAN_FLOAT compute_occlusion(kmVec3 thispt, kmVec3 horizonpt) { kmVec3 direction; kmVec3Subtract(&direction, &horizonpt, &thispt); kmVec3Normalize(&direction, &direction); HEMAN_FLOAT dot = kmVec3Dot(&direction, &KM_VEC3_POS_Z); return atan(MAX(dot, 0.0f)) * TWO_OVER_PI; } static void horizon_scan( heman_image* heightmap, heman_image* result, int* startpts, int dx, int dy) { int w = heightmap->width, h = heightmap->height; int sx = SGN(dx), sy = SGN(dy); int ax = abs(dx), ay = abs(dy); // Generate the start positions for each sweep line. The start positions // occur just outside the image boundary. int nsweeps = ay * w + ax * h - (ax + ay - 1); int* p = startpts; for (int x = -ax; x < w - ax; x++) { for (int y = -ay; y < h - ay; y++) { if (x >= 0 && x < w && y >= 0 && y < h) { continue; } *p++ = (sx < 0) ? (w - x - 1) : x; *p++ = (sy < 0) ? (h - y - 1) : y; } } assert(nsweeps == (p - startpts) / 2); // Compute the number of steps by doing a mock sweep. int pathlen = 0; int i = startpts[0], j = startpts[1]; do { i += dx; j += dy; ++pathlen; } while (i >= 0 && i < w && j >= 0 && j < h); // Each cell in the grid has a certain width and height. These can be // multiplied by row / column indices to get world-space X / Y values, // which are in the same coordinate system as the height values. HEMAN_FLOAT cellw = _occlusion_scale / MAX(w, h); HEMAN_FLOAT cellh = _occlusion_scale / MAX(w, h); // Initialize a stack of candidate horizon points, one for each sweep. In a // serial implementation we wouldn't need to allocate this much memory, but // we're trying to make life easy for multithreading. kmVec3* hull_buffer = malloc(sizeof(kmVec3) * pathlen * nsweeps); // Finally, perform the actual sweeps. We're careful to touch each pixel // exactly once, which makes this embarassingly threadable. int sweep; #pragma omp parallel for for (sweep = 0; sweep < nsweeps; sweep++) { kmVec3* convex_hull = hull_buffer + sweep * pathlen; int* p = startpts + sweep * 2; int i = p[0]; int j = p[1]; kmVec3 thispt, horizonpt; thispt.x = i * cellw; thispt.y = j * cellh; thispt.z = *heman_image_texel(heightmap, EDGE(i, w), EDGE(j, h)); int stack_top = 0; convex_hull[0] = thispt; i += dx, j += dy; while (i >= 0 && i < w && j >= 0 && j < h) { thispt.x = i * cellw; thispt.y = j * cellh; thispt.z = *heman_image_texel(heightmap, i, j); while (stack_top > 0) { HEMAN_FLOAT s1 = azimuth_slope(thispt, convex_hull[stack_top]); HEMAN_FLOAT s2 = azimuth_slope(thispt, convex_hull[stack_top - 1]); if (s1 >= s2) { break; } stack_top--; } horizonpt = convex_hull[stack_top++]; assert(stack_top < pathlen); convex_hull[stack_top] = thispt; HEMAN_FLOAT occlusion = compute_occlusion(thispt, horizonpt); *heman_image_texel(result, i, j) += INV_SCANS * occlusion; i += dx; j += dy; } } free(hull_buffer); } heman_image* heman_lighting_compute_occlusion(heman_image* heightmap) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* result = heman_image_create(width, height, 1); memset(result->data, 0, sizeof(HEMAN_FLOAT) * width * height); // Define sixteen 2D vectors, used for the sweep directions. const int scans[NUM_SCANS * 2] = { 1, 0, 0, 1, -1, 0, 0, -1, // Rook 1, 1, -1, -1, 1, -1, -1, 1, // Bishop 2, 1, 2, -1, -2, 1, -2, -1, 1, 2, 1, -2, -1, 2, -1, -2 // Knight }; // Allocate memory that will store the starting positions of each sweep. int* startpts = malloc(sizeof(int) * 2 * 3 * kmMax(width, height)); // Make each sweep serially, accumulating the result. for (int i = 0; i < NUM_SCANS; i++) { int dx = scans[i * 2]; int dy = scans[i * 2 + 1]; horizon_scan(heightmap, result, startpts, dx, dy); } // Invert the occlusion values and make sure they are valid. for (int i = 0; i < width * height; i++) { result->data[i] = 1.0f - result->data[i]; assert(result->data[i] >= 0.0 && result->data[i] <= 1.0f); } free(startpts); return result; }
relu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "relu_kernel_arm.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) static inline int relu_kernel(const int i, const int id, const void* data, const float* input, float* output, const float slope) { float32x4_t _zero = vdupq_n_f32(0.f); int step = (( int* )data)[0]; const float* cur_input = input + id * step; float* cur_output = output + id * step; if (slope == 0) { for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); _p = vmaxq_f32(_p, _zero); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(*cur_input++, 0.f); } } else { float32x4_t _slope = vdupq_n_f32(slope); for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); // ri = ai <= bi ? 1...1:0...0 uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _ps = vmulq_f32(_p, _slope); // bitwise select _p = vbslq_f32(_lemask, _ps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(cur_input[0], 0.f) + slope * MIN(cur_input[0], 0.f); cur_input++; } } return 0; } int relu_arm_run(struct tensor* output_tensor, struct tensor* input_tensor, struct relu_param* relu_param, int num_thread) { float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; float negativeslope = relu_param->negative_slope; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; relu_kernel(0, 0, &chan_size, data + offset, out_data + offset, negativeslope); } return 0; }
GB_binop__bget_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_uint16 // A.*B function (eWiseMult): GB_AemultB__bget_uint16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_uint16 // C+=b function (dense accum): GB_Cdense_accumb__bget_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_uint16 // C=scalar+B GB_bind1st__bget_uint16 // C=scalar+B' GB_bind1st_tran__bget_uint16 // C=A+scalar GB_bind2nd__bget_uint16 // C=A'+scalar GB_bind2nd_tran__bget_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITGET (x, y, uint16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bget_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bget_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bget_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
colormap.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR M M AAA PPPP % % C O O L O O R R MM MM A A P P % % C O O L O O RRRR M M M AAAAA PPPP % % C O O L O O R R M M A A P % % CCCC OOO LLLLL OOO R R M M A A P % % % % % % MagickCore Colormap Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % We use linked-lists because splay-trees do not currently support duplicate % key / value pairs (.e.g X11 green compliance and SVG green compliance). % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/client.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AcquireImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % The format of the AcquireImageColormap method is: % % MagickBooleanType AcquireImageColormap(Image *image,const size_t colors, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireImageColormap(Image *image, const size_t colors,ExceptionInfo *exception) { ssize_t i; /* Allocate image colormap. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (colors > MaxColormapSize) { image->colors=0; image->storage_class=DirectClass; ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=MagickMax(colors,1); if (image->colormap == (PixelInfo *) NULL) image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); else image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) { image->colors=0; image->storage_class=DirectClass; ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } for (i=0; i < (ssize_t) image->colors; i++) { double pixel; GetPixelInfo(image,image->colormap+i); pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1))); image->colormap[i].red=pixel; image->colormap[i].green=pixel; image->colormap[i].blue=pixel; image->colormap[i].alpha=(MagickRealType) OpaqueAlpha; image->colormap[i].alpha_trait=BlendPixelTrait; } return(SetImageStorageClass(image,PseudoClass,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C y c l e C o l o r m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CycleColormap() displaces an image's colormap by a given number of % positions. If you cycle the colormap a number of times you can produce % a psychodelic effect. % % WARNING: this assumes an images colormap is in a well know and defined % order. Currently Imagemagick has no way of setting that order. % % The format of the CycleColormapImage method is: % % MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o displace: displace the colormap this amount. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CycleColormapImage(Image *image, const ssize_t displace,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == DirectClass) (void) SetImageType(image,PaletteType,exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; ssize_t index; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors; if (index < 0) index+=(ssize_t) image->colors; SetPixelIndex(image,(Quantum) index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S o r t C o l o r m a p B y I n t e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SortColormapByIntensity() sorts the colormap of a PseudoClass image by % decreasing color intensity. % % The format of the SortColormapByIntensity method is: % % MagickBooleanType SortColormapByIntensity(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: A pointer to an Image structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelInfo *color_1, *color_2; int intensity; color_1=(const PixelInfo *) x; color_2=(const PixelInfo *) y; intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int) GetPixelInfoIntensity((const Image *) NULL,color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport MagickBooleanType SortColormapByIntensity(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; ssize_t y; unsigned short *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (image->storage_class != PseudoClass) return(MagickTrue); /* Allocate memory for pixel indexes. */ pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors, sizeof(*pixels)); if (pixels == (unsigned short *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Assign index values to colormap entries. */ for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; /* Sort image colormap by decreasing color popularity. */ qsort((void *) image->colormap,(size_t) image->colors, sizeof(*image->colormap),IntensityCompare); /* Update image colormap indexes to sorted colormap order. */ for (i=0; i < (ssize_t) image->colors; i++) pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; ssize_t x; Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { i=ConstrainColormapIndex(image,GetPixelIndex(image,q),exception); index=(Quantum) pixels[i]; SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (status == MagickFalse) break; } image_view=DestroyCacheView(image_view); pixels=(unsigned short *) RelinquishMagickMemory(pixels); return(status); }
residualbased_predictorcorrector_velocity_bdf_scheme_turbulent.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BDF_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BDF_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// BDF2 time scheme for the incompressible flow problem. /** This scheme implements update operations and the calculation of the BDF coefficients for variable time step sizes. * * WARNING: this scheme assumes that the element internally implements the BDF2 scheme and is hence NOT compatible with the * elements ASGS2D, ASGS3D, VMS, MonolithicWallConditon * * the compatible element so far is * @see TwoFluidVMS * * note also that in the prediction step only the velocity, and NOT the pressure is extrapolated in time. */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorBDFSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorBDFSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorBDFSchemeTurbulent( unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP) // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. {} /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorBDFSchemeTurbulent( unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag) // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. {} /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorBDFSchemeTurbulent( unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mpTurbulenceModel(pTurbulenceModel), mRotationTool(DomainSize,DomainSize+1,SLIP) // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs {} /** Destructor. */ ~ResidualBasedPredictorCorrectorBDFSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; if(Dt != 0.0 && OldDt != 0) { //estimate acceleration from velocity in the past and predict the future. Note that pressure is NOT predicted const ModelPart::NodesContainerType::iterator it_begin = rModelPart.NodesBegin(); array_1d<double,3> dv; //in the next loop we do for each node //vn+1 = vn + dt*(vn - vn-1)/oldDt #pragma omp parallel for private(dv) for(int i=0; i< static_cast<int>(rModelPart.Nodes().size()); i++) { ModelPart::NodesContainerType::iterator it = it_begin + i; const array_1d<double,3>& aux = it->FastGetSolutionStepValue(VELOCITY,1); noalias(dv) = aux; noalias(dv) -= it->FastGetSolutionStepValue(VELOCITY,2); array_1d<double,3>& v = it->FastGetSolutionStepValue(VELOCITY); const double dt_ratio = Dt/OldDt; if(it->IsFixed(VELOCITY_X) == false) v[0] = aux[0] + dt_ratio*dv[0]; if(it->IsFixed(VELOCITY_Y) == false) v[1] = aux[1] + dt_ratio*dv[1]; if(it->IsFixed(VELOCITY_Z) == false) v[2] = aux[2] + dt_ratio*dv[2]; //noalias(v) = aux; //noalias( v ) += () * dv; } } else { if (rModelPart.GetCommunicator().MyPID() == 0) std::cout << "predict is doing nothing since OldDt = " << OldDt << "and Dt = " << Dt << std::endl; } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions(Element::Pointer rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentElement)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); KRATOS_CATCH("") } void Calculate_RHS_Contribution(Element::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement->GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void Condition_CalculateSystemContributions(Condition::Pointer rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); (rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo); (rCurrentCondition)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH("") } void Condition_Calculate_RHS_Contribution(Condition::Pointer rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); //Initializing the non linear iteration for the current condition (rCurrentCondition) -> InitializeNonLinearIteration(rCurrentProcessInfo); //basic operations for the element considered (rCurrentCondition)->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId,rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& rCurrentProcessInfo = r_model_part.GetProcessInfo(); if (r_model_part.GetBufferSize() != 3) KRATOS_THROW_ERROR(std::logic_error, "wrong buffer size. Expects 3, currently: ", r_model_part.GetBufferSize()); //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; if(OldDt == 0.0) KRATOS_THROW_ERROR(std::logic_error,"found an OldDt = 0.0 in InitializeSolutionStep",""); double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; if(BDFcoeffs.size() != 3) BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); } //************************************************************************************* //************************************************************************************* void InitializeNonLinIteration(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY if (mpTurbulenceModel != 0) // If not null mpTurbulenceModel->Execute(); KRATOS_CATCH("") } void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { KRATOS_INFO_IF("ResidualBasedPredictorCorrectorBDFSchemeTurbulent", rModelPart.GetCommunicator().MyPID() == 0) << "Computing OSS projections" << std::endl; for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output; for (typename ModelPart::ElementsContainerType::iterator elem = rModelPart.ElementsBegin(); elem != rModelPart.ElementsEnd(); elem++) { elem->Calculate(ADVPROJ, output, CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { ComputeReactions(rModelPart, A, Dx, b); //Element::EquationIdVectorType EquationId; //LocalSystemVectorType RHS_Contribution; //LocalSystemMatrixType LHS_Contribution; //ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); // //ModelPart::NodeIterator itnodes_begin = rModelPart.NodesBegin(); //const int nnodes = static_cast<int>(rModelPart.Nodes().size()); //#pragma omp parallel for firstprivate(nnodes, itnodes_begin) //for(int i=0; i<nnodes; i++) //{ // ModelPart::NodeIterator itNode = itnodes_begin + i; // (itNode->FastGetSolutionStepValue(REACTION)).clear(); //} // // //ModelPart::ElementsContainerType::iterator itelem_begin = rModelPart.ElementsBegin(); //const int nelems = static_cast<int>(rModelPart.Elements().size()); // #pragma omp parallel for firstprivate(nelems, itelem_begin) //for(int i=0; i<nelems; i++) //{ // ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; // // (itElem)->InitializeNonLinearIteration(CurrentProcessInfo); // (itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //TODO: call CalculateRHS instead // GeometryType& rGeom = (itElem)->GetGeometry(); // const int NumNodes = static_cast<int>(rGeom.PointsNumber()); // unsigned int Dimension = rGeom.WorkingSpaceDimension(); // unsigned int index = 0; // // for (int i = 0; i < NumNodes; i++) // { // // array_1d<double,3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); // rGeom[i].SetLock(); // rReaction[0] -= RHS_Contribution[index++]; // rReaction[1] -= RHS_Contribution[index++]; // if (Dimension == 3) rReaction[2] -= RHS_Contribution[index++]; // rGeom[i].UnSetLock(); // index++; // skip pressure dof // // } //} // //rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // //#pragma omp parallel for firstprivate(nelems, itelem_begin) //for(int i=0; i<nelems; i++) //{ // ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; // (itElem)->FinalizeSolutionStep(CurrentProcessInfo); //} } virtual void ComputeReactions(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::NodeIterator itnodes_begin = rModelPart.NodesBegin(); const int nnodes = static_cast<int>(rModelPart.Nodes().size()); #pragma omp parallel for firstprivate(nnodes, itnodes_begin) for (int i = 0; i<nnodes; i++) { ModelPart::NodeIterator itNode = itnodes_begin + i; (itNode->FastGetSolutionStepValue(REACTION)).clear(); } ModelPart::ElementsContainerType::iterator itelem_begin = rModelPart.ElementsBegin(); const int nelems = static_cast<int>(rModelPart.Elements().size()); #pragma omp parallel for firstprivate(nelems, itelem_begin) for (int i = 0; i<nelems; i++) { ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; (itElem)->InitializeNonLinearIteration(CurrentProcessInfo); (itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //TODO: call CalculateRHS instead GeometryType& rGeom = (itElem)->GetGeometry(); const int NumNodes = static_cast<int>(rGeom.PointsNumber()); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (int i = 0; i < NumNodes; i++) { array_1d<double, 3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); rGeom[i].SetLock(); rReaction[0] -= RHS_Contribution[index++]; rReaction[1] -= RHS_Contribution[index++]; if (Dimension == 3) rReaction[2] -= RHS_Contribution[index++]; rGeom[i].UnSetLock(); index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); #pragma omp parallel for firstprivate(nelems, itelem_begin) for (int i = 0; i<nelems; i++) { ModelPart::ElementsContainerType::iterator itElem = itelem_begin + i; (itElem)->FinalizeSolutionStep(CurrentProcessInfo); } } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ Process::Pointer mpTurbulenceModel; CoordinateTransformationUtils<LocalSystemMatrixType, LocalSystemVectorType, double> mRotationTool; /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BDF_TURBULENT_SCHEME defined */
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c -lm To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int writeMandelbrot(const char *fileName, int width, int height, float *img, int minI, int maxI); #define MXITER 2048 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t dc, float *count){ #pragma omp for for(int n=0;n<Nim;++n){ for(int m=0;m<Nre;++m){ complex_t c; c.r = cmin.r + dc.r*m; c.i = cmin.i + dc.i*n; count[m+n*Nre] = (float) testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 32 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[argc-1]); omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; complex_t dc; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; //set step sizes dc.r = (cmax.r-cmin.r)/(Nre-1); dc.i = (cmax.i-cmin.i)/(Nim-1); // replace with omp wtime clock_t start = omp_get_wtime(); //start time in CPU cycles // compute mandelbrot set mandelbrot(Nre, Nim, cmin, dc, count); // replace with omp wtime clock_t end = omp_get_wtime(); //start time in CPU cycles // print elapsed time printf("elapsed = %lf\n", ((double)(end-start))/CLOCKS_PER_SEC); // output mandelbrot to png format image printf("Printing mandelbrot.ppm..."); writeMandelbrot("mandelbrot.ppm", Nre, Nim, count, 0, 80); free(count); exit(0); return 0; } /* Output data as PPM file */ void saveppm(const char *filename, unsigned char *img, int width, int height){ /* FILE pointer */ FILE *f; /* Open file for writing */ f = fopen(filename, "wb"); /* PPM header info, including the size of the image */ fprintf(f, "P6 %d %d %d\n", width, height, 255); /* Write the image data to the file - remember 3 byte per pixel */ fwrite(img, 3, width*height, f); /* Make sure you close the file */ fclose(f); } int writeMandelbrot(const char *fileName, int width, int height, float *img, int minI, int maxI){ int n, m; unsigned char *rgb = (unsigned char*) calloc(3*width*height, sizeof(unsigned char)); for(n=0;n<height;++n){ for(m=0;m<width;++m){ int id = m+n*width; int I = (int) (768*sqrt((double)(img[id]-minI)/(maxI-minI))); // change this to change palette if(I<256) rgb[3*id+2] = 255-I; else if(I<512) rgb[3*id+1] = 511-I; else if(I<768) rgb[3*id+0] = 767-I; else if(I<1024) rgb[3*id+0] = 1023-I; else if(I<1536) rgb[3*id+1] = 1535-I; else if(I<2048) rgb[3*id+2] = 2047-I; } } saveppm(fileName, rgb, width, height); free(rgb); }
6558.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute dist_schedule(static, 1) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
selection_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_SELECTION_MOVE_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_SELECTION_MOVE_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class SelectionMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: public: /*************************************************************************/ SelectionMoveGenerator(void) { /// nothing to do } /*************************************************************************/ virtual ~SelectionMoveGenerator(void) { /// nothing to do } /*************************************************************************/ constexpr void setup( std::vector<model_component::Variable<T_Variable, T_Expression> *> &a_VARIABLE_PTRS) { /** * "Swap" move for binary variables in selection * constraints: e.g.) selection constraint x + y + z = 1 (x, * y, z \in {0, 1}) move: {(x = 0, y = 1), (x = 0, z = 1)} * (if x = 1, y = 0, z = 0) */ /** * Setup move objects. */ const int VARIABLES_SIZE = a_VARIABLE_PTRS.size(); this->m_moves.resize(VARIABLES_SIZE); this->m_flags.resize(VARIABLES_SIZE); for (auto i = 0; i < VARIABLES_SIZE; i++) { this->m_moves[i].sense = MoveSense::Selection; this->m_moves[i].related_constraint_ptrs = a_VARIABLE_PTRS[i]->selection_ptr()->related_constraint_ptrs; this->m_moves[i].is_univariable_move = false; this->m_moves[i].is_special_neighborhood_move = false; this->m_moves[i].is_available = true; this->m_moves[i].overlap_rate = 0.0; } /** * Setup move updater */ auto move_updater = // [this, a_VARIABLE_PTRS, VARIABLES_SIZE]( auto * a_moves, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < VARIABLES_SIZE; i++) { (*a_moves)[i].alterations.clear(); (*a_moves)[i].alterations.emplace_back( a_VARIABLE_PTRS[i] ->selection_ptr() ->selected_variable_ptr, 0); (*a_moves)[i].alterations.emplace_back(a_VARIABLE_PTRS[i], 1); } const int MOVES_SIZE = a_moves->size(); #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < MOVES_SIZE; i++) { (*a_flags)[i] = 1; if (neighborhood::has_fixed_variable((*a_moves)[i])) { (*a_flags)[i] = 0; continue; } if ((*a_moves)[i].alterations[0].first == (*a_moves)[i].alterations[1].first) { (*a_flags)[i] = 0; continue; } if (a_ACCEPT_ALL) { /** nothing to do */ } else { if (a_ACCEPT_OBJECTIVE_IMPROVABLE && neighborhood::has_objective_improvable_variable( (*a_moves)[i])) { continue; } if (a_ACCEPT_FEASIBILITY_IMPROVABLE && neighborhood::has_feasibility_improvable_variable( (*a_moves)[i])) { continue; } (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
rvsTimeCpmlFor2dAw_openmp_mex.c
/* ====================================================================== * * rvsTimeCpmlFor2dAw_mex.c * * Simulates 2-d acoustic wave reverse propagation using finite difference * in time domain with partial differential equation (PDE) * * This C source file is free for use in academic research. * All rights reserved. * * * Written by Lingchen Zhu (zhulingchen@gmail.com) * Center for Signal and Information Processing, Center for Energy & Geo Processing * Georgia Institute of Technology * * ====================================================================== */ #include "mex.h" #include <omp.h> #include "finiteDifference.h" #include <math.h> #include <string.h> /* input arguments */ #define VM_IN prhs[0] #define DATA_IN prhs[1] #define DIFFORDER_IN prhs[2] #define BOUNDARY_IN prhs[3] #define DZ_IN prhs[4] #define DX_IN prhs[5] #define DT_IN prhs[6] /* output arguments */ #define MODEL_OUT plhs[0] #define SNAPSHOT_OUT plhs[1] /*#define TEST_OUT plhs[2]*/ /* out argument for test */ /* the gateway routine */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* begin of declaration */ double *pVelocityModel, *pData, *pModel, *pSnapshot; double dz, dx, dt; int diffOrder, boundary; int l, i, j, t; mwSize nz, nx, nt; mwSize pDimsSnapshot[3] = {0}; double *pCoeff, *pOldRtm, *pCurRtm, *pNewRtm; double *puDampLeft, *pvDampLeft, *puDampRight, *pvDampRight, *puDampDown, *pvDampDown; double *pxDampLeft, *pxDampRight, *pxDamp, *pxb, *pzDampDown, *pzDamp, *pzb; double *pVdtSq; double *pSource; double *pzPhi, *pxPhi, *pzA, *pxA, *pzPsi, *pxPsi, *pzP, *pxP; double *pCurRtm_diffIn_zPhi, *pCurRtm_diffOut_zPhi, *pCurRtm_diffIn_xPhi, *pCurRtm_diffOut_xPhi; double *pCurRtm_diffIn_zA, *pCurRtm_diffOut_zA, *pCurRtm_diffIn_xA, *pCurRtm_diffOut_xA; double *pzA_diffIn, *pzA_diffOut, *pxA_diffIn, *pxA_diffOut; /* end of declaration */ if (nrhs < 7) mexErrMsgTxt("All 7 input arguments shall be provided!"); /* ATTENTION: mxGetPr might just produce a 1D array that is linearized according to Matlab convention (column order) */ pVelocityModel = mxGetPr(VM_IN); pData = mxGetPr(DATA_IN); diffOrder = *mxGetPr(DIFFORDER_IN); boundary = *mxGetPr(BOUNDARY_IN); dz = *mxGetPr(DZ_IN); dx = *mxGetPr(DX_IN); dt = *mxGetPr(DT_IN); nz = mxGetM(VM_IN); nx = mxGetN(VM_IN); mxAssert(nx == mxGetM(DATA_IN), "Velocity model and input data should have the same x-axis grids!"); nt = mxGetN(DATA_IN); /* initialize storage */ pDimsSnapshot[0] = nz; pDimsSnapshot[1] = nx; pDimsSnapshot[2] = nt; SNAPSHOT_OUT = mxCreateNumericArray(3, pDimsSnapshot, mxDOUBLE_CLASS, mxREAL); pSnapshot = mxGetPr(SNAPSHOT_OUT); pCoeff = dCoef(diffOrder, "s"); l = 2 * diffOrder - 1; /* damp profile of x-axis */ puDampLeft = (double*)mxCalloc(nz * boundary, sizeof(double)); for (j = 0; j < boundary; j++) for (i = 0; i < nz; i++) puDampLeft[j * nz + i] = (boundary - j) * dx; pvDampLeft = (double*)mxCalloc(nz * boundary, sizeof(double)); memcpy(pvDampLeft, pVelocityModel, sizeof(double) * nz * boundary); pxDampLeft = dampPml(puDampLeft, pvDampLeft, nz, boundary, boundary * dx); puDampRight = (double*)mxCalloc(nz * boundary, sizeof(double)); for (j = 0; j < boundary; j++) for (i = 0; i < nz; i++) puDampRight[j * nz + i] = (j + 1) * dx; pvDampRight = (double*)mxCalloc(nz * boundary, sizeof(double)); memcpy(pvDampRight, pVelocityModel + (nx-boundary) * nz, sizeof(double) * nz * boundary); pxDampRight = dampPml(puDampRight, pvDampRight, nz, boundary, boundary * dx); pxDamp = (double*)mxCalloc(nz * nx, sizeof(double)); memcpy(pxDamp, pxDampLeft, sizeof(double) * nz * boundary); memcpy(pxDamp + (nx-boundary) * nz, pxDampRight, sizeof(double) * nz * boundary); pxb = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pxb[j * nz + i] = exp(-pxDamp[j * nz + i] * dt); /* damp profile of z-axis */ puDampDown = (double*)mxCalloc(boundary * nx, sizeof(double)); for (j = 0; j < nx; j++) for(i = 0; i < boundary; i++) puDampDown[j * boundary + i] = (i + 1) * dz; pvDampDown = (double*)mxCalloc(boundary * nx, sizeof(double)); for (j = 0; j < nx; j++) for(i = 0; i < boundary; i++) pvDampDown[j * boundary + i] = pVelocityModel[j * nz + (nz - boundary + i)]; pzDampDown = dampPml(puDampDown, pvDampDown, boundary, nx, boundary * dz); pzDamp = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = nz-boundary; i < nz; i++) pzDamp[j * nz + i] = pzDampDown[j * boundary + i-(nz-boundary)]; pzb = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pzb[j * nz + i] = exp(-pzDamp[j * nz + i] * dt); /* ====================================================================== * 2-D Acoustic Wave Forward-Time Modeling * ====================================================================== */ /* additional arrays for storage intermediate results */ /* rtm(:, :, 1) - oldRtm; rtm(:, :, 2) - curRtm; rtm(:, :, 3) - newRtm */ pOldRtm = (double*)mxCalloc((nz+2*l) * (nx+2*l), sizeof(double)); pCurRtm = (double*)mxCalloc((nz+2*l) * (nx+2*l), sizeof(double)); pNewRtm = (double*)mxCalloc((nz+2*l) * (nx+2*l), sizeof(double)); pzPhi = (double*)mxCalloc((nz+2*l) * nx, sizeof(double)); pxPhi = (double*)mxCalloc(nz * (nx+2*l), sizeof(double)); pzA = (double*)mxCalloc((nz+2*l) * nx, sizeof(double)); pxA = (double*)mxCalloc(nz * (nx+2*l), sizeof(double)); pzPsi = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pxPsi = (double*)mxCalloc(nz * (nx+l), sizeof(double)); pzP = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pxP = (double*)mxCalloc(nz * (nx+l), sizeof(double)); pVdtSq = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pVdtSq[j * nz + i] = (pVelocityModel[j * nz + i] * dt) * (pVelocityModel[j * nz + i] * dt); pCurRtm_diffIn_zPhi = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pCurRtm_diffIn_xPhi = (double*)mxCalloc(nz * (nx+l), sizeof(double)); pCurRtm_diffIn_zA = (double*)mxCalloc((nz+2*l) * nx, sizeof(double)); pCurRtm_diffIn_xA = (double*)mxCalloc(nz * (nx+2*l), sizeof(double)); pzA_diffIn = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pxA_diffIn = (double*)mxCalloc(nz * (nx+l), sizeof(double)); /* * izi = l:(nz+l-1); len: nz * ixi = l:(nx+l-1); len: nx * izl = (diffOrder-1):(nz+2*l-diffOrder-1); len: nz+l * ixl = (diffOrder-1):(nx+2*l-diffOrder-1); len: nx+l */ for (t = nt-1; t >= 0; t--) /* reverse propagation */ { /*source = zeros(nz, nx);*/ /*source(1, :) = data(:, it).';*/ pSource = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) pSource[j * nz] = pData[t * nx + j]; /* zPhi(izi, :) = zb .* zPhi(izi, :) + (zb - 1) .* diffOperator(rtm(izl+1, ixi, 2), coeff, dz, 1); */ for (j = l; j < nx+l; j++) for (i = diffOrder; i < nz+2*l-diffOrder+1; i++) pCurRtm_diffIn_zPhi[(j - l) * (nz+l) + (i-diffOrder)] = pCurRtm[j * (nz+2*l) + i]; pCurRtm_diffOut_zPhi = diffOperator2d(pCurRtm_diffIn_zPhi, nz+l, nx, pCoeff, diffOrder, dz, 1); for (j = 0; j < nx; j++) for (i = l; i < nz + l; i++) pzPhi[j * (nz+2*l) + i] = pzb[j * nz + (i - l)] * pzPhi[j * (nz+2*l) + i] + (pzb[j * nz + (i - l)] - 1) * pCurRtm_diffOut_zPhi[j * nz + (i - l)]; /* xPhi(:, ixi) = xb .* xPhi(:, ixi) + (xb - 1) .* diffOperator(rtm(izi, ixl+1, 2), coeff, dx, 2); */ for (j = diffOrder; j < nx+2*l-diffOrder+1; j++) for (i = l; i < nz+l; i++) pCurRtm_diffIn_xPhi[(j-diffOrder) * nz + (i - l)] = pCurRtm[j * (nz+2*l) + i]; pCurRtm_diffOut_xPhi = diffOperator2d(pCurRtm_diffIn_xPhi, nz, nx+l, pCoeff, diffOrder, dx, 2); for (j = l; j < nx + l; j++) for (i = 0; i < nz; i++) pxPhi[j * nz + i] = pxb[(j - l) * nz + i] * pxPhi[j * nz + i] + (pxb[(j - l) * nz + i] - 1) * pCurRtm_diffOut_xPhi[(j - l) * nz + i]; /* zA(izl, :) = diffOperator(rtm(:, ixi, 2), coeff, dz, 1) + zPhi(izl, :); */ memcpy(pCurRtm_diffIn_zA, pCurRtm + l * (nz+2*l), sizeof(double) * nx * (nz+2*l)); pCurRtm_diffOut_zA = diffOperator2d(pCurRtm_diffIn_zA, nz+2*l, nx, pCoeff, diffOrder, dz, 1); for (j = 0; j < nx; j++) for (i = diffOrder - 1; i < nz+2*l-diffOrder; i++) pzA[j * (nz+2*l) + i] = pCurRtm_diffOut_zA[j * (nz+l) + (i - (diffOrder - 1))] + pzPhi[j * (nz+2*l) + i]; /* xA(:, ixl) = diffOperator(rtm(izi, :, 2), coeff, dx, 2) + xPhi(:, ixl); */ for (j = 0; j < nx+2*l; j++) for (i = l; i < nz+l; i++) pCurRtm_diffIn_xA[j * nz + (i - l)] = pCurRtm[j * (nz+2*l) + i]; pCurRtm_diffOut_xA = diffOperator2d(pCurRtm_diffIn_xA, nz, nx+2*l, pCoeff, diffOrder, dx, 2); for (j = diffOrder - 1; j < nx+2*l-diffOrder; j++) for (i = 0; i < nz; i++) pxA[j * nz + i] = pCurRtm_diffOut_xA[(j - (diffOrder - 1)) * nz + i] + pxPhi[j * nz + i]; /* zPsi(izi, :) = zb .* zPsi(izi, :) + (zb - 1) .* diffOperator(zA(izl, :), coeff, dz, 1); */ for (j = 0; j < nx; j++) for (i = diffOrder - 1; i < nz+2*l-diffOrder; i++) pzA_diffIn[j * (nz+l) + (i - (diffOrder - 1))] = pzA[j * (nz+2*l) + i]; pzA_diffOut = diffOperator2d(pzA_diffIn, nz+l, nx, pCoeff, diffOrder, dz, 1); for (j = 0; j < nx; j++) for (i = l; i < nz + l; i++) pzPsi[j * (nz+l) + i] = pzb[j * nz + (i - l)] * pzPsi[j * (nz+l) + i] + (pzb[j * nz + (i - l)] - 1) * pzA_diffOut[j * nz + (i - l)]; /* xPsi(:, ixi) = xb .* xPsi(:, ixi) + (xb - 1) .* diffOperator(xA(:, ixl), coeff, dx, 2); */ memcpy(pxA_diffIn, pxA + (diffOrder - 1) * nz, sizeof(double) * (nx+l) * nz); pxA_diffOut = diffOperator2d(pxA_diffIn, nz, nx+l, pCoeff, diffOrder, dx, 2); for (j = l; j < nx + l; j++) for (i = 0; i < nz; i++) pxPsi[j * nz + i] = pxb[(j - l) * nz + i] * pxPsi[j * nz + i] + (pxb[(j - l) * nz + i] - 1) * pxA_diffOut[(j - l) * nz + i]; /* zP(izi, :) = diffOperator(zA(izl, :), coeff, dz, 1) + zPsi(izi, :); */ for (j = 0; j < nx; j++) for (i = l; i < nz + l; i++) pzP[j * (nz+l) + i] = pzA_diffOut[j * nz + (i - l)] + pzPsi[j * (nz+l) + i]; /* xP(:, ixi) = diffOperator(xA(:, ixl), coeff, dx, 2) + xPsi(:, ixi); */ for (j = l; j < nx + l; j++) for (i = 0; i < nz; i++) pxP[j * nz + i] = pxA_diffOut[(j - l) * nz + i] + pxPsi[j * nz + i]; /* ====================================================================== * One-step finite difference calculation * ====================================================================== */ /* rtm(izi, ixi, 3) = vdtSq .* (zP(izi, :) + xP(:, ixi) + source) + 2 * rtm(izi, ixi, 2) - rtm(izi, ixi, 1); */ #pragma omp parallel private(j, i) { #pragma omp for schedule(static, 8) for (j = l; j < nx + l; j++) for (i = l; i < nz + l; i++) pNewRtm[j * (nz+2*l) + i] = pVdtSq[(j - l) * nz + (i - l)] * ( pzP[(j - l) * (nz+l) + i] + pxP[j * nz + (i - l)] + pSource[(j - l) * nz + (i - l)] ) + 2 * pCurRtm[j * (nz+2*l) + i] - pOldRtm[j * (nz+2*l) + i]; } /* update finite difference matrices */ /* rtm(:, :, 1) = rtm(:, :, 2); */ memcpy(pOldRtm, pCurRtm, sizeof(double) * (nz+2*l) * (nx+2*l)); /* rtm(:, :, 2) = rtm(:, :, 3); */ memcpy(pCurRtm, pNewRtm, sizeof(double) * (nz+2*l) * (nx+2*l)); /* update snapshot */ /* snapshot(:, :, it) = rtm(izi, ixi, 2); */ for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pSnapshot[t * (nz * nx) + j * nz + i] = pCurRtm[(j + l) * (nz+2*l) + (i + l)]; /* ATTENTION: Don't forget to free dynamic memory allocated by mxCalloc function (except for output arrays), otherwise memory leak will occur */ mxFree(pSource); mxFree(pCurRtm_diffOut_zPhi); mxFree(pCurRtm_diffOut_xPhi); mxFree(pCurRtm_diffOut_zA); mxFree(pCurRtm_diffOut_xA); mxFree(pzA_diffOut); mxFree(pxA_diffOut); } /* write out final wavefield */ /* model = rtm(:, :, 1); */ MODEL_OUT = mxCreateDoubleMatrix(nz+2*l, nx+2*l, mxREAL); pModel = mxGetPr(MODEL_OUT); memcpy(pModel, pOldRtm, sizeof(double) * (nz+2*l) * (nx+2*l)); /* test begin */ /* TEST_OUT = source; */ /* test end */ /* ATTENTION: Don't forget to free dynamic memory allocated by mxCalloc function (except for output arrays), otherwise memory leak will occur */ mxFree(pCoeff); mxFree(pOldRtm); mxFree(pCurRtm); mxFree(pNewRtm); mxFree(puDampLeft); mxFree(pvDampLeft); mxFree(puDampRight); mxFree(pvDampRight); mxFree(puDampDown); mxFree(pvDampDown); mxFree(pxDampLeft); mxFree(pxDampRight); mxFree(pxDamp); mxFree(pxb); mxFree(pzDampDown); mxFree(pzDamp); mxFree(pzb); mxFree(pVdtSq); mxFree(pzPhi); mxFree(pxPhi); mxFree(pzA); mxFree(pxA); mxFree(pzPsi); mxFree(pxPsi); mxFree(pzP); mxFree(pxP); mxFree(pCurRtm_diffIn_zPhi); mxFree(pCurRtm_diffIn_xPhi); mxFree(pCurRtm_diffIn_zA); mxFree(pCurRtm_diffIn_xA); mxFree(pzA_diffIn); mxFree(pxA_diffIn); }
SegmentationUtil.h
/** * spaint: SegmentationUtil.h * Copyright (c) Torr Vision Group, University of Oxford, 2016. All rights reserved. */ #ifndef H_SPAINT_SEGMENTATIONUTIL #define H_SPAINT_SEGMENTATIONUTIL #include <boost/mpl/identity.hpp> #include <itmx/base/ITMImagePtrTypes.h> namespace spaint { /** * \brief This class provides utility functions for working with segmentation masks. */ class SegmentationUtil { //#################### PUBLIC STATIC MEMBER FUNCTIONS #################### public: /** * \brief Applies a binary mask to an image. * * This version of the function is needed to assist the compiler with type deduction. * * \param mask The binary mask. * \param image The image to which to apply it. * \param backgroundValue The value to use for background pixels in the masked image. * \return A masked version of the input image. */ template <typename T> static boost::shared_ptr<ORUtils::Image<T> > apply_mask(const ITMUCharImage_CPtr& mask, const boost::shared_ptr<ORUtils::Image<T> >& image, const typename boost::mpl::identity<T>::type& backgroundValue) { return apply_mask(mask, boost::shared_ptr<const ORUtils::Image<T> >(image), backgroundValue); } /** * \brief Applies a binary mask to an image. * * \param mask The binary mask. * \param image The image to which to apply it. * \param backgroundValue The value to use for background pixels in the masked image. * \return A masked version of the input image. */ template <typename T> static boost::shared_ptr<ORUtils::Image<T> > apply_mask(const ITMUCharImage_CPtr& mask, const boost::shared_ptr<const ORUtils::Image<T> >& image, const typename boost::mpl::identity<T>::type& backgroundValue) { boost::shared_ptr<ORUtils::Image<T> > maskedImage(new ORUtils::Image<T>(image->noDims, true, true)); const uchar *maskPtr = mask->GetData(MEMORYDEVICE_CPU); const T *imagePtr = image->GetData(MEMORYDEVICE_CPU); T *maskedImagePtr = maskedImage->GetData(MEMORYDEVICE_CPU); int pixelCount = static_cast<int>(image->dataSize); #ifdef WITH_OPENMP #pragma omp parallel for #endif for(int i = 0; i < pixelCount; ++i) { maskedImagePtr[i] = maskPtr[i] ? imagePtr[i] : backgroundValue; } return maskedImage; } /** * \brief Inverts a binary mask. * * \param mask The mask to invert. * \return An inverted version of the mask. */ static ITMUCharImage_Ptr invert_mask(const ITMUCharImage_CPtr& mask); }; } #endif
GB_unop__identity_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_bool_bool) // C type: bool // A type: bool // cast: bool cij = aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__min_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_int16 // A.*B function (eWiseMult): GB_AemultB__min_int16 // A*D function (colscale): GB_AxD__min_int16 // D*A function (rowscale): GB_DxB__min_int16 // C+=B function (dense accum): GB_Cdense_accumB__min_int16 // C+=b function (dense accum): GB_Cdense_accumb__min_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_int16 // C=scalar+B GB_bind1st__min_int16 // C=scalar+B' GB_bind1st_tran__min_int16 // C=A+scalar GB_bind2nd__min_int16 // C=A'+scalar GB_bind2nd_tran__min_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMIN (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT16 || GxB_NO_MIN_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
polybench.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* polybench.c: this file is part of PolyBench/C */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif #if defined(POLYBENCH_PAPI) # undef POLYBENCH_PAPI # include "polybench.h" # define POLYBENCH_PAPI #else # include "polybench.h" #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char* _polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* * Allocation table, to enable inter-array padding. All data allocated * with polybench_alloc_data should be freed with polybench_free_data. * */ #define NB_INITIAL_TABLE_ENTRIES 512 struct polybench_data_ptrs { void** user_view; void** real_ptr; int nb_entries; int nb_avail_entries; }; static struct polybench_data_ptrs* _polybench_alloc_table = NULL; static size_t polybench_inter_array_padding_sz = 0; /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS) struct timeval Tp; int stat; stat = gettimeofday (&Tp, NULL); if (stat != 0) printf ("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double* flush = (double*) calloc (cs, sizeof(double)); int i; double tmp = 0.0; #ifdef _OPENMP #pragma omp parallel for reduction(+:tmp) private(i) #endif for (i = 0; i < cs; i++) tmp += flush[i]; assert (tmp <= 10.0); free (flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO); sched_setscheduler (0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER); sched_setscheduler (0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line); else { fprintf (stdout,"%-40s SKIPPED\n", file); fprintf (stdout,"Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf (buf, "System error in %s", call); perror (buf); } else if (retval > 0) fprintf (stdout,"Error: %s\n", call); else if (retval == 0) fprintf (stdout,"Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; // PAPI 5.4.3 has changed the API for PAPI_perror. #if defined (PAPI_VERSION) && ((PAPI_VERSION_MAJOR(PAPI_VERSION) == 5 && PAPI_VERSION_MINOR(PAPI_VERSION) >= 4) || PAPI_VERSION_MAJOR(PAPI_VERSION) > 5) fprintf (stdout, "Error in %s: %s\n", call, PAPI_strerror(retval)); #else PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN); fprintf (stdout,"Error in %s: %s\n", call, errstring); #endif } fprintf (stdout,"\n"); if (PAPI_is_initialized ()) PAPI_shutdown (); exit (1); } void polybench_papi_init() { # ifdef _OPENMP #pragma omp parallel { #pragma omp master { if (omp_get_max_threads () < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads () - 1; } #pragma omp barrier if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail (__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code (_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_close() { # ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized ()) PAPI_shutdown (); # ifdef _OPENMP } } #pragma omp barrier # endif } int polybench_papi_start_counter(int evid) { # ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); # endif # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr); if (PAPI_add_event (polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_start", retval); # ifdef _OPENMP } } #pragma omp barrier # endif return 0; } void polybench_papi_stop_counter(int evid) { # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read (polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval); # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_print() { int verbose = 0; # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf ("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf ("%s=", _polybench_papi_eventlist[evid]); printf ("%llu ", polybench_papi_values[evid]); if (verbose) printf ("\n"); } printf ("\n"); # ifdef _OPENMP } } #pragma omp barrier # endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler (); #endif } void polybench_timer_start() { polybench_prepare_instruments (); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock (); #else polybench_c_start = rdtsc (); #endif } void polybench_timer_stop() { #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock (); #else polybench_c_end = rdtsc (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler (); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (polybench_program_total_flops == 0) { printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf ("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf ("%0.2lf\n", (polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf ("%0.6f\n", polybench_t_end - polybench_t_start); # else printf ("%Ld\n", polybench_c_end - polybench_c_start); # endif #endif } /* * These functions are used only if the user defines a specific * inter-array padding. It grows a global structure, * _polybench_alloc_table, which keeps track of the data allocated via * polybench_alloc_data (on which inter-array padding is applied), so * that the original, non-shifted pointer can be recovered when * calling polybench_free_data. * */ #ifdef POLYBENCH_ENABLE_INTARRAY_PAD static void grow_alloc_table() { if (_polybench_alloc_table == NULL || (_polybench_alloc_table->nb_entries % NB_INITIAL_TABLE_ENTRIES) != 0 || _polybench_alloc_table->nb_avail_entries != 0) { /* Should never happen if the API is properly used. */ fprintf (stderr, "[ERROR] Inter-array padding requires to use polybench_alloc_data and polybench_free_data\n"); exit (1); } size_t sz = _polybench_alloc_table->nb_entries; sz += NB_INITIAL_TABLE_ENTRIES; _polybench_alloc_table->user_view = realloc (_polybench_alloc_table->user_view, sz * sizeof(void*)); assert(_polybench_alloc_table->user_view != NULL); _polybench_alloc_table->real_ptr = realloc (_polybench_alloc_table->real_ptr, sz * sizeof(void*)); assert(_polybench_alloc_table->real_ptr != NULL); _polybench_alloc_table->nb_avail_entries = NB_INITIAL_TABLE_ENTRIES; } static void* register_padded_pointer(void* ptr, size_t orig_sz, size_t padded_sz) { if (_polybench_alloc_table == NULL) { fprintf (stderr, "[ERROR] Inter-array padding requires to use polybench_alloc_data and polybench_free_data\n"); exit (1); } if (_polybench_alloc_table->nb_avail_entries == 0) grow_alloc_table (); int id = _polybench_alloc_table->nb_entries++; _polybench_alloc_table->real_ptr[id] = ptr; _polybench_alloc_table->user_view[id] = ptr + (padded_sz - orig_sz); return _polybench_alloc_table->user_view[id]; } static void free_data_from_alloc_table (void* ptr) { if (_polybench_alloc_table != NULL && _polybench_alloc_table->nb_entries > 0) { int i; for (i = 0; i < _polybench_alloc_table->nb_entries; ++i) if (_polybench_alloc_table->user_view[i] == ptr || _polybench_alloc_table->real_ptr[i] == ptr) break; if (i != _polybench_alloc_table->nb_entries) { free (_polybench_alloc_table->real_ptr[i]); for (; i < _polybench_alloc_table->nb_entries - 1; ++i) { _polybench_alloc_table->user_view[i] = _polybench_alloc_table->user_view[i + 1]; _polybench_alloc_table->real_ptr[i] = _polybench_alloc_table->real_ptr[i + 1]; } _polybench_alloc_table->nb_entries--; _polybench_alloc_table->nb_avail_entries++; if (_polybench_alloc_table->nb_entries == 0) { free (_polybench_alloc_table->user_view); free (_polybench_alloc_table->real_ptr); free (_polybench_alloc_table); _polybench_alloc_table = NULL; } } } } static void check_alloc_table_state() { if (_polybench_alloc_table == NULL) { _polybench_alloc_table = (struct polybench_data_ptrs*) malloc (sizeof(struct polybench_data_ptrs)); assert(_polybench_alloc_table != NULL); _polybench_alloc_table->user_view = (void**) malloc (sizeof(void*) * NB_INITIAL_TABLE_ENTRIES); assert(_polybench_alloc_table->user_view != NULL); _polybench_alloc_table->real_ptr = (void**) malloc (sizeof(void*) * NB_INITIAL_TABLE_ENTRIES); assert(_polybench_alloc_table->real_ptr != NULL); _polybench_alloc_table->nb_entries = 0; _polybench_alloc_table->nb_avail_entries = NB_INITIAL_TABLE_ENTRIES; } } #endif // !POLYBENCH_ENABLE_INTARRAY_PAD static void* xmalloc(size_t alloc_sz) { // void* ret = NULL; // /* By default, post-pad the arrays. Safe behavior, but likely useless. */ // polybench_inter_array_padding_sz += POLYBENCH_INTER_ARRAY_PADDING_FACTOR; // size_t padded_sz = alloc_sz + polybench_inter_array_padding_sz; // int err = posix_memalign (&ret, 4096, padded_sz); // if (! ret || err) // { // fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory"); // exit (1); // } // /* Safeguard: this is invoked only if polybench.c has been compiled // with inter-array padding support from polybench.h. If so, move // the starting address of the allocation and return it to the // user. The original pointer is registered in an allocation table // internal to polybench.c. Data must then be freed using // polybench_free_data, which will inspect the allocation table to // free the original pointer.*/ //#ifdef POLYBENCH_ENABLE_INTARRAY_PAD /* This moves the 'ret' pointer by (padded_sz - alloc_sz) positions, and registers it in the lookup table for future free using polybench_free_data. */ // ret = register_padded_pointer(ret, alloc_sz, padded_sz); //#endif // return ret; return malloc(alloc_sz); } void polybench_free_data(void* ptr) { #ifdef POLYBENCH_ENABLE_INTARRAY_PAD free_data_from_alloc_table (ptr); #else free (ptr); #endif } void* polybench_alloc_data(unsigned long long int n, int elt_size) { #ifdef POLYBENCH_ENABLE_INTARRAY_PAD check_alloc_table_state (); #endif /// FIXME: detect overflow! size_t val = n; val *= elt_size; void* ret = xmalloc (val); return ret; }
4419.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel { #pragma omp parallel for schedule(dynamic, 8) num_threads(4) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp parallel for private (j) schedule(dynamic, 8) num_threads(4) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
nr_incore.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Incore version of non-relativistic integrals JK contraction * ic in CVHFic... is short for incore */ #include <stdlib.h> #include <string.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cvhf.h" #include "np_helper/np_helper.h" #include "fblas.h" /* * J */ void CVHFics8_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; double vj_ij = 0; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0, ij = 0; i < ic; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); vj[i*nao+j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm[i*nao+i]; vj[i*nao+i] += eri[ij] * dm_ij; ij++; } // i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); vj[i*nao+j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm_ij; vj[ic*nao+jc] += vj_ij; } void CVHFics4_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i*nao+j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double vj_ij = 0; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); } vj_ij += eri[ij] * dm[i*nao+i]; ij++; } vj[ic*nao+jc] += vj_ij; } /* * K */ void CVHFics8_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; vk[l*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+jc] += eri[kl] * dm[l*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[k*nao+jc]; vk[k*nao+ic] += eri[kl] * dm[l*nao+jc]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; vk[k*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+ic] += eri[kl] * dm[k*nao+jc]; kl++; } k = ic; for (l = 0; l < jc; l++, kl++) { // l<k vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; vk[l*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+jc] += eri[kl] * dm[l*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[k*nao+jc]; vk[k*nao+ic] += eri[kl] * dm[l*nao+jc]; } // ic = k, jc = l; vk[jc*nao+jc] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+jc] += eri[kl] * dm[jc*nao+ic]; vk[jc*nao+ic] += eri[kl] * dm[ic*nao+jc]; vk[ic*nao+ic] += eri[kl] * dm[jc*nao+jc]; } else if (ic == jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; vk[l*nao+ic] += eri[kl] * dm[k*nao+ic]; vk[k*nao+ic] += eri[kl] * dm[l*nao+ic]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; vk[k*nao+ic] += eri[kl] * dm[k*nao+ic]; kl++; } k = ic; for (l = 0; l < k; l++, kl++) { // l<k vk[ic*nao+l] += eri[kl] * dm[ic*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+ic] += eri[kl] * dm[l*nao+ic]; } // ic = jc = k = l vk[ic*nao+ic] += eri[kl] * dm[ic*nao+ic]; } } void CVHFics8_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l; //double vk_jj = 0; //double vk_ij = 0; if (ic > jc) { // k < jc for (k=0; k < jc; k++) { for (l = 0; l < k; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+k]; vk[jc*nao+k] += eri[l] * dm[ic*nao+l]; vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; } // l = k vk[jc*nao+k] += eri[k] * dm[ic*nao+k]; vk[ic*nao+k] += eri[k] * dm[jc*nao+k]; eri += k + 1; } // k = jc for (l = 0; l < k; l++) { vk[jc*nao+l ] += eri[l] * dm[ic*nao+jc]; vk[ic*nao+l ] += eri[l] * dm[jc*nao+jc]; vk[jc*nao+jc] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+l]; } // l = k = jc vk[jc*nao+jc] += eri[l] *(dm[ic*nao+jc] + dm[jc*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+jc]; eri += k + 1; // k > jc for (k=jc+1; k < ic; k++) { // l < jc for (l = 0; l < jc; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; vk[k*nao+jc] += eri[l] * dm[l*nao+ic]; } // l = jc vk[jc*nao+jc] += eri[l] *(dm[ic*nao+k] + dm[k*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+jc]; vk[k*nao+jc] += eri[l] * dm[jc*nao+ic]; //eri += jc+1; // l > jc for (l = jc+1; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; vk[l*nao+jc] += eri[l] * dm[k*nao+ic]; vk[k*nao+jc] += eri[l] * dm[l*nao+ic]; } // l = k vk[jc*nao+k] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+k]; vk[k*nao+jc] += eri[l] * dm[k*nao+ic]; eri += k + 1; } // k = ic for (l = 0; l < jc; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+l] += eri[l] * dm[jc*nao+ic]; vk[ic*nao+ic] += eri[l] *(dm[jc*nao+l] + dm[l*nao+jc]); vk[ic*nao+jc] += eri[l] * dm[l*nao+ic]; } // ic = k, jc = l; vk[jc*nao+jc] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+jc] += eri[l] * dm[jc*nao+ic]; vk[ic*nao+ic] += eri[l] * dm[jc*nao+jc]; eri += jc + 1; } else if (ic == jc) { for (k = 0; k < ic-1; k+=2) { for (l = 0; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[ic*nao+l]; vk[ic*nao+l ] += eri[l+k+1] * dm[ic*nao+k+1]; vk[ic*nao+k+1] += eri[l+k+1] * dm[ic*nao+l ]; } vk[ic*nao+k] += eri[k] * dm[ic*nao+k]; eri += k+1; vk[ic*nao+k ] += eri[k] * dm[ic*nao+k+1]; vk[ic*nao+k+1] += eri[k] * dm[ic*nao+k ]; vk[ic*nao+k+1] += eri[k+1] * dm[ic*nao+k+1]; eri += k+2; } for (; k < ic; k++) { for (l = 0; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[k] * dm[ic*nao+k]; eri += k+1; } for (l = 0; l < k; l++) { // l<k vk[ic*nao+l] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]); } // ic = jc = k = l vk[ic*nao+ic] += eri[l] * dm[ic*nao+ic]; eri += k + 1; } } void CVHFics4_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } } } void CVHFics4_il_s1jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k <= jc; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } for (k = jc+1; k <= ic; k++) { for (l = 0; l <= jc; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } for (l = jc+1; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } for (k = ic+1; k < nao; k++) { for (l = 0, kl = k*(k+1)/2; l <= jc; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } for (l = jc+1; l <= ic; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k <= ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } for (k = ic+1; k < nao; k++) { for (l = 0, kl = k*(k+1)/2; l <= ic; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; } } } } void CVHFics4_il_s2jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,ij->(s2)kl * 8-fold symmetry for eri: i>=j,k>=l,ij>=kl * input address eri of the first element for pair ij=ic*(ic+1)/2+jc * i.e. ~ &eri_ao[ij*(ij+1)/2] * dm can be non-Hermitian, * output vk might not be Hermitian * * NOTE all _s2kl (nrs8_, nrs4_, nrs2kl_) assumes the tril part of eri * being stored in C-order *contiguously*. so call CVHFunpack_nrblock2tril * to generate eris */ void CVHFics8_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics8_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } // tri_dm: fold upper triangular dm to lower triangle, // tri_dm[i*(i+1)/2+j] = dm[i*nao+j] + dm[j*nao+i] for i > j void CVHFics8_tridm_vj(double *eri, double *tri_dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ijc = tri_dm[ic*(ic+1)/2+jc]; double vj_ij = 0; const int INC1 = 1; int i1; for (i = 0, ij = 0; i < ic; i++) { i1 = i + 1; vj_ij += ddot_(&i1, eri+ij, &INC1, tri_dm+ij, &INC1); daxpy_(&i1, &dm_ijc, eri+ij, &INC1, vj+i*nao, &INC1); ij += i1; } // i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] * tri_dm[ij]; vj[i*nao+j] += eri[ij] * dm_ijc; } vj_ij += eri[ij] * dm_ijc; vj[ic*nao+jc] += vj_ij; } void CVHFics8_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->(s2)il * output vk should be Hermitian */ void CVHFics8_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->il * 4-fold symmetry for eri: i>=j,k>=l */ void CVHFics4_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * output vk should be Hermitian */ void CVHFics4_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s2jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics4_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } void CVHFics4_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic >= jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } } void CVHFics1_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij = dm[ic*nao+jc]; for (i = 0; i < nao*nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics1_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { const int INC1 = 1; int nn = nao * nao; vj[ic*nao+jc] += ddot_(&nn, eri, &INC1, dm, &INC1); } void CVHFics1_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } void CVHFics1_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; } } } void CVHFics2ij_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0; i < nao*nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics2ij_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic < jc) { return; } CVHFics1_kl_s1ij(eri, dm, vj, nao, ic, jc); } void CVHFics2ij_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; } } } } void CVHFics2ij_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } } } } void CVHFics2kl_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij = dm[ic*nao+jc]; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i*nao+j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } } void CVHFics2kl_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } } /************************************************** * s8 8-fold symmetry: i>=j,k>=l,ij>=kl * s4 4-fold symmetry: i>=j,k>=l * s2ij 2-fold symmetry: i>=j * s2kl 2-fold symmetry: k>=l * s1 no permutation symmetry **************************************************/ typedef void (*FjkPtr)(double *eri, double *dm, double *vk, int nao, int ic, int jc); void CVHFnrs8_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij*(ij+1)/2; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs4_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs2ij_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs2kl_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs1_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } }
01_array_sum.c
/* ────────────────────────────────────────────────────────────────────────── * │ │ │ This file is part of the exercises for the Lectures on │ │ "Foundations of High Performance Computing" │ │ given at │ │ Master in HPC and │ │ Master in Data Science and Scientific Computing │ │ @ SISSA, ICTP and University of Trieste │ │ │ │ contact: luca.tornatore@inaf.it │ │ │ │ This is free software; you can redistribute it and/or modify │ │ it under the terms of the GNU General Public License as published by │ │ the Free Software Foundation; either version 3 of the License, or │ │ (at your option) any later version. │ │ This code is distributed in the hope that it will be useful, │ │ but WITHOUT ANY WARRANTY; without even the implied warranty of │ │ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the │ │ GNU General Public License for more details. │ │ │ │ You should have received a copy of the GNU General Public License │ │ along with this program. If not, see <http://www.gnu.org/licenses/> │ │ │ * ────────────────────────────────────────────────────────────────────────── */ /* * COMPILE LINE (icc): -Ofast -fno-alias -xCORE-AVX2 -xHost -fma -use-intel-optimized-headers -falign-loops -qopenmp -parallel -pthread -ipo -vec */ #if defined(__STDC__) #if (__STDC_VERSION__ >= 199901L) #define _XOPEN_SOURCE 700 #endif #endif #define _GNU_SOURCE #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define N_default 100 #if defined(_OPENMP) #define CPU_TIME (clock_gettime(CLOCK_REALTIME, &ts), (double)ts.tv_sec + (double)ts.tv_nsec * 1e-9) #define CPU_TIME_th \ (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &myts), (double)myts.tv_sec + (double)myts.tv_nsec * 1e-9) #else #define CPU_TIME (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts), (double)ts.tv_sec + (double)ts.tv_nsec * 1e-9) #endif int main(int argc, char** argv) { int N = N_default; int nthreads = 1; struct timespec ts; double* array; /* ----------------------------------------------------------------------------- * initialize * ----------------------------------------------------------------------------- */ // check whether some arg has been passed on if (argc > 1) N = atoi(*(argv + 1)); // allocate memory if ((array = (double*)malloc(N * sizeof(double))) == NULL) { printf("I'm sorry, there is not enough memory to host %lu bytes\n", (unsigned int)N * sizeof(double)); return 1; } // just give notice of what will happen and get the number of threads used #ifndef _OPENMP printf("serial summation\n"); #else #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); printf("omp summation with %d threads\n", nthreads); } } #endif // initialize the array srand48(time(NULL)); for (int ii = 0; ii < N; ii++) array[ii] = (double)ii; // choose the initialization you prefer; //array[ii] = drand48(); // the first one (with integers) makes it // easy to check the result /* ----------------------------------------------------------------------------- * calculate * ----------------------------------------------------------------------------- */ double S = 0; // this will store the summation double th_avg_time = 0; // this will be the average thread runtime double th_min_time = 1e11; // this will be the min thread runtime. // contrasting the average and the min // time taken by the threads, you may // have an idea of the unbalance. double tstart = CPU_TIME; #if !defined(_OPENMP) for (int ii = 0; ii < N; ii++) // well, you may notice this implementation S += array[ii]; // is particularly inefficient anyway #else #pragma omp parallel reduction(+:th_avg_time) \ reduction(min:th_min_time) // in this region there are 2 different { // reductions: the one of runtime, which struct timespec myts; // happens in the whole parallel region; double mystart = CPU_TIME_th; // and the one on S, which takes place #pragma omp for reduction(+ : S) // in the for loop. for (int ii = 0; ii < N; ii++) S += array[ii]; double mytime = CPU_TIME_th - mystart; th_avg_time += mytime; th_min_time = (mytime < th_min_time) ? mytime : th_min_time; } #endif double tend = CPU_TIME; // this timer is CLOCK_REALTIME if OpenMP // is active; CLOCK_PROCESS_CPU_TIME_ID // otherwise. That is because the latter // would accounts for the whole cpu time // used by the threads under OpenMP. /* ----------------------------------------------------------------------------- * finalize * ----------------------------------------------------------------------------- */ // printf("\nSum is %g, process took <%g> of wall-clock time\n" // "<%g> sec of avg thread-time\n" // "<%g> sec of min thread-time\n", // S, tend - tstart, th_avg_time / nthreads, th_min_time); // printf("%g SUM\n\n\n" // "%g WALL\n" // "%g THAVG\n" // "%g THMIN\n", // S, tend - tstart, th_avg_time / nthreads, th_min_time); printf("%g SUM\n\n\n" "%g\n" // Wall time "%g\n" // Average thread time (single thread avg) "%g\n", // Min thread time (single thread min) S, tend - tstart, th_avg_time / nthreads, th_min_time); free(array); return 0; }
miner.c
//VF CASH - Standalone Miner - August 2019 //James William Fletcher #include <omp.h> #include <stdio.h> #include <math.h> #include <time.h> #include <string.h> #include <stdlib.h> #include "ecc.h" #include "base58.h" uint8_t rpriv[ECC_BYTES]; uint8_t rpub[ECC_BYTES+1]; int autoclaim = 0; float approx_sqrt(float n) { long i; float x2, y; const float threehalfs = 1.5F; x2 = n * 0.5F; y = n; i = * ( long * ) &y; i = 0x5f3759df - ( i >> 1 ); y = * ( float * ) &i; y = y * ( threehalfs - ( x2 * y * y ) ); y = y * ( threehalfs - ( x2 * y * y ) ); return n*y; } double toDB(const uint64_t b) { return (double)(b) / 1000; } double mfloor(double i) { if(i < 0) return (int)i - 1; else return (int)i; } struct vec3 { uint16_t x,y,z; }; typedef struct vec3 vec3; double gNa(const vec3* a, const vec3* b) { const double dot = ((double)(a->x) * (double)(b->x)) + ((double)(a->y) * (double)(b->y)) + (double)((a->z) * (double)(b->z)); const double m1 = sqrt((double)((a->x) * (double)(a->x)) + (double)((a->y) * (double)(a->y)) + (double)((a->z) * (double)(a->z))); const double m2 = sqrt((double)((b->x) * (double)(b->x)) + (double)((b->y) * (double)(b->y)) + (double)((b->z) * (double)(b->z))); if((m1 == 0 && m2 == 0) || dot == 0) return 1; return dot / (m1*m2); } uint64_t isSubGenesisAddress(uint8_t *a) { vec3 v[5]; uint8_t *ofs = a; memcpy(&v[0].x, ofs, sizeof(uint16_t)); memcpy(&v[0].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[0].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[1].x, ofs, sizeof(uint16_t)); memcpy(&v[1].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[1].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[2].x, ofs, sizeof(uint16_t)); memcpy(&v[2].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[2].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[3].x, ofs, sizeof(uint16_t)); memcpy(&v[3].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[3].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[4].x, ofs, sizeof(uint16_t)); memcpy(&v[4].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[4].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); const double a1 = gNa(&v[0], &v[3]); const double a2 = gNa(&v[3], &v[2]); const double a3 = gNa(&v[2], &v[1]); const double a4 = gNa(&v[1], &v[4]); const double min = 0.180; if(a1 < min && a2 < min && a3 < min && a4 < min) { const double at = (a1+a2+a3+a4); if(at <= 0) return 0; const double ra = at/4; const double mn = 4.166666667; const uint64_t rv = (uint64_t)mfloor(( 1000 + ( 10000*(1-(ra*mn)) ) )+0.5); printf("\nsubG: %.8f - %.8f - %.8f - %.8f - %.3f VFC < %.3f\n", a1, a2, a3, a4, toDB(rv), ra); return rv; } const double soft = 0.1; if(a1 < min+soft && a2 < min+soft && a3 < min+soft && a4 < min+soft) printf("x: %.8f - %.8f - %.8f - %.8f\n", a1, a2, a3, a4); return 0; } double subDiff(uint8_t *a) { vec3 v[5]; //Vectors uint8_t *ofs = a; memcpy(&v[0].x, ofs, sizeof(uint16_t)); memcpy(&v[0].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[0].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[1].x, ofs, sizeof(uint16_t)); memcpy(&v[1].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[1].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[2].x, ofs, sizeof(uint16_t)); memcpy(&v[2].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[2].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[3].x, ofs, sizeof(uint16_t)); memcpy(&v[3].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[3].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); ofs = ofs + (sizeof(uint16_t)*3); memcpy(&v[4].x, ofs, sizeof(uint16_t)); memcpy(&v[4].y, ofs + sizeof(uint16_t), sizeof(uint16_t)); memcpy(&v[4].z, ofs + (sizeof(uint16_t)*2), sizeof(uint16_t)); const double a1 = gNa(&v[0], &v[3]); const double a2 = gNa(&v[3], &v[2]); const double a3 = gNa(&v[2], &v[1]); const double a4 = gNa(&v[1], &v[4]); //printf("%.3f - %.3f - %.3f - %.3f\n", a1,a2,a3,a4); double diff = a1; if(a2 > diff) diff = a2; if(a3 > diff) diff = a3; if(a4 > diff) diff = a4; return diff; } int main(int argc, char* args[]) { // enable auto claim? if(argc == 2 && strcmp(args[1], "autoclaim") == 0) { autoclaim = 1; printf("Auto claim will attempt to claim all minted.txt to the last key that has been appended to rewards.txt.\n"); } printf("Please wait, minted keys are saved to minted.txt, mining at difficulty 0.18 ...\n"); //Save reward addr used today ecc_make_key(rpub, rpriv); FILE* f = fopen("reward.txt", "a"); if(f != NULL) { char bpriv[256]; memset(bpriv, 0, sizeof(bpriv)); size_t len = sizeof(bpriv); b58enc(bpriv, &len, rpriv, ECC_CURVE); fprintf(f, "%s\n", bpriv); fclose(f); } #pragma omp parallel //#pragma omp target teams distribute parallel for //for(int i=0; i < 2048; ++i) while(1) { //i=0; int tid = omp_get_thread_num(); int nthreads; if(tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads: %d\n", nthreads); } time_t nt = time(0)+16; uint64_t c = 0; while(1) { if(time(0) > nt) { if(tid == 0) printf("HASH/s: %lu\n", (c*nthreads)/16); if(c > 0) printf("T-%i: %lu\n", omp_get_thread_num(), c/16); c = 0; nt = time(0)+16; } uint8_t priv[ECC_BYTES]; uint8_t pub[ECC_BYTES+1]; ecc_make_key(pub, priv); uint64_t r = isSubGenesisAddress(pub); if(r != 0) { char bpriv[256]; memset(bpriv, 0, sizeof(bpriv)); size_t len = 256; b58enc(bpriv, &len, priv, ECC_BYTES); char bpub[256]; memset(bpub, 0, sizeof(bpub)); len = 256; b58enc(bpub, &len, pub, ECC_BYTES+1); char brpriv[256]; memset(brpriv, 0, sizeof(brpriv)); len = 256; b58enc(brpriv, &len, rpriv, ECC_BYTES); char brpub[256]; memset(brpub, 0, sizeof(brpub)); len = 256; b58enc(brpub, &len, rpub, ECC_BYTES+1); const double diff = subDiff(pub); const double fr = toDB(r); //Log in console printf("Private Key: %s (%.3f DIFF) (%.3f VFC)\n\n", bpriv, diff, fr); //Try to claim if(autoclaim == 1) { char cmd[2048]; sprintf(cmd, "wget -qO- \"https://vfcash.uk/rest.php?fromprivfast=%s&frompub=%s&topub=%s&amount=%.3f\"", bpriv, bpub, brpub, fr); if(system(cmd) != -1) printf("\n%s\n", cmd); } //Log claim url FILE* f = fopen("trans.txt", "a"); if(f != NULL) { fprintf(f, "https://vfcash.uk/rest.php?fromprivfast=%s&frompub=%s&topub=%s&amount=%.3f\n", bpriv, bpub, brpub, fr); fclose(f); } //Log in minted f = fopen("minted.txt", "a"); if(f != NULL) { fprintf(f, "%s / %.3f / %.3f\n", bpriv, diff, fr); fclose(f); } } c++; } } return 0; }
GB_unaryop__identity_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_fp32 // op(A') function: GB_tran__identity_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpu_code.c
#include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> void advect_flow_fields_c(double*, double*, double*, double*, double*, double* , double*, double*,double*, double*, double, double,int, int, int, int, int, int, int); void advect_u_flow_field_c(double*, double*, double*, double* , double*, double*, double, double,int, int, int, int, int, int, int); void advect_v_flow_field_c(double*, double*, double*, double* , double*, double*, double, double,int, int, int, int, int, int, int); void advect_w_flow_field_c(double*, double*, double*, double* , double*, double*, double, double,int, int, int, int, int, int, int); static long getEpoch(); static double getTiming(long, long); static long long getTotalFLOPS(int, int, int, int); void advect_th_field_c(double * sth, double * th, double * u, double * v, double * w, double * tzc1, double * tzc2, double cx, double cy, int size_x, int size_y, int size_z, int start_x, int end_x, int start_y, int end_y) { int i, j, k, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1; for (i=start_x;i<end_x;i++) { counter_loc=(i * size_y * size_z) + (size_z * start_y); counter_loc_xp1=((i+1) * size_y * size_z) + (size_z * start_y); counter_loc_xm1=((i-1) * size_y * size_z) + (size_z * start_y); counter_loc_yp1=(i * size_y * size_z) + (size_z * (start_y+1)); counter_loc_ym1=(i * size_y * size_z) + (size_z * (start_y-1)); for (j=start_y;j<end_y;j++) { for (k=1;k<size_z;k++) { counter_loc++; counter_loc_xp1++; counter_loc_xm1++; counter_loc_yp1++; counter_loc_ym1++; sth[counter_loc]=cx*0.5*(u[counter_loc_xm1] * th[counter_loc_xm1] - u[counter_loc] * th[counter_loc_xp1]); sth[counter_loc]=sth[counter_loc]+cy*0.5*(v[counter_loc_ym1] * th[counter_loc_ym1] - v[counter_loc] * th[counter_loc_yp1]); if (k < size_z - 1) { sth[counter_loc]=sth[counter_loc]+2.0*(tzc1[k]*w[counter_loc-1]*th[counter_loc-1] - tzc2[counter_loc]*w[counter_loc]*th[counter_loc+1]); } else { // Lid sth[counter_loc]=sth[counter_loc]+tzc1[k]*2.0*w[counter_loc-1]*th[counter_loc-1]; } } } } } int main(int argc, char * argv[]) { int size_x=atoi(argv[1]), size_y=atoi(argv[2]), iterations=atoi(argv[3]), size_z=64; int hs=2; int start_x=hs, end_x=size_x+hs, start_y=hs, end_y=size_y+hs; int field_x=size_x+(hs*2), field_y=size_y+(hs*2); double * su=(double *) malloc(sizeof(double) * field_x * field_y * size_z); double * sv=(double *) malloc(sizeof(double) * field_x * field_y * size_z); double * sw=(double *) malloc(sizeof(double) * field_x * field_y * size_z); double * u=(double *) malloc(sizeof(double) * field_x * field_y * size_z); double * v=(double *) malloc(sizeof(double) * field_x * field_y * size_z); double * w=(double *) malloc(sizeof(double) * field_x * field_y * size_z); double * tzc1=(double *) malloc(sizeof(double) * size_z); double * tzc2=(double *) malloc(sizeof(double) * size_z); double * tzd1=(double *) malloc(sizeof(double) * size_z); double * tzd2=(double *) malloc(sizeof(double) * size_z); printf("Advecting over %d threads with compute domain X=%d Y=%d Z=%d, total domain size of X=%d Y=%d Z=%d\n", omp_get_max_threads(), size_x, size_y, size_z, field_x, field_y, size_z); long start=getEpoch(); for (int i=0;i<iterations;i++) { advect_flow_fields_c(su, sv, sw, u, v, w, tzc1, tzc2, tzd1, tzd2, 1.0, 2.0, field_x, field_y, size_z, start_x, end_x, start_y, end_y); } double overalltime=getTiming(getEpoch(), start); printf("Runtime is %f ms\n", overalltime); double kernelFLOPS=(getTotalFLOPS(size_x, size_y, size_z, iterations) / (overalltime / 1000)) / 1024 / 1024 / 1024; printf("Overall GFLOPS %.2f\n", kernelFLOPS); return 0; } void advect_flow_fields_c(double * su, double * sv, double * sw, double * u, double * v, double * w, double * tzc1, double * tzc2, double * tzd1, double * tzd2, double tcx, double tcy, int size_x, int size_y, int size_z, int start_x, int end_x, int start_y, int end_y) { advect_u_flow_field_c(su, u, v, w, tzc1, tzc2, tcx, tcy, size_x, size_y, size_z, start_x, end_x, start_y, end_y); advect_v_flow_field_c(sv, u, v, w, tzc1, tzc2, tcx, tcy, size_x, size_y, size_z, start_x, end_x, start_y, end_y); advect_w_flow_field_c(sw, u, v, w, tzd1, tzd2, tcx, tcy, size_x, size_y, size_z, start_x, end_x, start_y, end_y); } void advect_u_flow_field_c(double * su, double * u, double * v, double * w, double * tzc1, double * tzc2, double tcx, double tcy, int size_x, int size_y, int size_z, int start_x, int end_x, int start_y, int end_y) { int i, j, k, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1, counter_loc_ym1_xp1; #pragma omp parallel for private(i, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1, j, k) for (i=start_x;i<end_x;i++) { counter_loc=(i * size_y * size_z) + (size_z * start_y); counter_loc_xp1=((i+1) * size_y * size_z) + (size_z * start_y); counter_loc_xm1=((i-1) * size_y * size_z) + (size_z * start_y); counter_loc_yp1=(i * size_y * size_z) + (size_z * (start_y+1)); counter_loc_ym1=(i * size_y * size_z) + (size_z * (start_y-1)); counter_loc_ym1_xp1=((i+1) * size_y * size_z) + (size_z * (start_y-1)); for (j=start_y;j<end_y;j++) { for (k=1;k<size_z;k++) { counter_loc++; counter_loc_xp1++; counter_loc_xm1++; counter_loc_yp1++; counter_loc_ym1++; su[counter_loc]=tcx*(u[counter_loc_xm1] * (u[counter_loc] + u[counter_loc_xm1]) - u[counter_loc_xp1] * (u[counter_loc] + u[counter_loc_xp1])); su[counter_loc]=su[counter_loc]+tcy*(u[counter_loc_ym1] * (v[counter_loc_ym1] + v[counter_loc_ym1_xp1]) - u[counter_loc_yp1] * (v[counter_loc] + v[counter_loc_xp1])); if (k < size_z - 1) { su[counter_loc]=su[counter_loc]+(tzc1[k] * u[counter_loc-1] * (w[counter_loc-1] + w[counter_loc_xp1-1]) - tzc2[k] * u[counter_loc+1] * (w[counter_loc] + w[counter_loc_xp1])); } else { // Lid su[counter_loc]=su[counter_loc]+tzc1[k] * u[counter_loc-1] * (w[counter_loc-1] + w[counter_loc_xp1-1]); } } } } } void advect_v_flow_field_c(double * sv, double * u, double * v, double * w, double * tzc1, double * tzc2, double tcx, double tcy, int size_x, int size_y, int size_z, int start_x, int end_x, int start_y, int end_y) { int i, j, k, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1; #pragma omp parallel for private(i, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1, j, k) for (i=start_x;i<end_x;i++) { counter_loc=(i * size_y * size_z) + (size_z * start_y); counter_loc_xp1=((i+1) * size_y * size_z) + (size_z * start_y); counter_loc_xm1=((i-1) * size_y * size_z) + (size_z * start_y); counter_loc_yp1=(i * size_y * size_z) + (size_z * (start_y+1)); counter_loc_ym1=(i * size_y * size_z) + (size_z * (start_y-1)); for (j=start_y;j<end_y;j++) { for (k=1;k<size_z;k++) { counter_loc++; counter_loc_xp1++; counter_loc_xm1++; counter_loc_yp1++; counter_loc_ym1++; sv[counter_loc]=tcy * (v[counter_loc_ym1] * (v[counter_loc] + v[counter_loc_ym1]) - v[counter_loc_yp1] * (v[counter_loc] + v[counter_loc_yp1])); sv[counter_loc]=sv[counter_loc]+tcx*(v[counter_loc_xm1] * (u[counter_loc_xm1] + u[counter_loc_yp1]) - v[counter_loc_xp1] * (u[counter_loc] + u[counter_loc_yp1])); if (k < size_z - 1) { sv[counter_loc]=sv[counter_loc]+(tzc1[k] * v[counter_loc-1] * (w[counter_loc-1] + w[counter_loc_yp1-1]) - tzc2[k] * v[counter_loc+1] * (w[counter_loc] + w[counter_loc_yp1])); } else { // Lid sv[counter_loc]=sv[counter_loc]+tzc1[k] * v[counter_loc-1] * (w[counter_loc-1] + w[counter_loc_yp1-1]); } } } } } void advect_w_flow_field_c(double * sw, double * u, double * v, double * w, double * tzd1, double * tzd2, double tcx, double tcy, int size_x, int size_y, int size_z, int start_x, int end_x, int start_y, int end_y) { int i, j, k, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1; #pragma omp parallel for private(i, counter_loc, counter_loc_xp1, counter_loc_xm1, counter_loc_yp1, counter_loc_ym1, j, k) for (i=start_x;i<end_x;i++) { counter_loc=(i * size_y * size_z) + (size_z * start_y); counter_loc_xp1=((i+1) * size_y * size_z) + (size_z * start_y); counter_loc_xm1=((i-1) * size_y * size_z) + (size_z * start_y); counter_loc_yp1=(i * size_y * size_z) + (size_z * (start_y+1)); counter_loc_ym1=(i * size_y * size_z) + (size_z * (start_y-1)); for (j=start_y;j<end_y;j++) { for (k=1;k<size_z-1;k++) { counter_loc++; counter_loc_xp1++; counter_loc_xm1++; counter_loc_yp1++; counter_loc_ym1++; sw[counter_loc]=tzd1[k] * w[counter_loc-1] * (w[counter_loc] + w[counter_loc-1]) - tzd2[k] * w[counter_loc+1] * (w[counter_loc] + w[counter_loc+1]); sw[counter_loc]=sw[counter_loc]+tcx*(w[counter_loc_xm1]*(u[counter_loc] + u[counter_loc_xm1+1]) - w[counter_loc_xp1] * (u[counter_loc] + u[counter_loc+1])); sw[counter_loc]=sw[counter_loc]+tcy*(w[counter_loc_ym1] * (v[counter_loc_ym1] + v[counter_loc_ym1+1]) - w[counter_loc_yp1] * (v[counter_loc] + v[counter_loc+1])); } } } } static long getEpoch() { struct timeval tm; gettimeofday(&tm, NULL); return (tm.tv_sec * 1000000)+tm.tv_usec; } static double getTiming(long end_time, long start_time) { return (end_time - start_time) / 1.0e3 ; } static long long getTotalFLOPS(int x_size, int y_size, int z_size, int iterations) { long long total_elements_xu=x_size * y_size * (z_size-1); long long lid_elements=x_size * y_size; long long non_lid_elements=total_elements_xu-lid_elements; long long total_elements_w=x_size * y_size * (z_size-2); long long advectxu_flops=(lid_elements * 17) + (non_lid_elements * 21); long long advectw_flops=total_elements_w * 21; return ((advectxu_flops * 2) + advectw_flops) * iterations; }
master.c
/* $ gcc -fopenmp -O2 src/master.c -o bin/master master lo ejecuta solo la hebra 0 diferencias entre single y master: - single se parece a master, pero en single es la primera que llegue y la master la ejecuta la hebra 0 - master no tiene barrera, single si $ export OMP_NUM_THREADS=3 $ ./bin/master 6 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 1 suma de a[2]=2 sumalocal=2 thread 1 suma de a[3]=3 sumalocal=5 thread 2 suma de a[4]=4 sumalocal=4 thread 2 suma de a[5]=5 sumalocal=9 thread master=0 imprime suma=15 */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid, a[n],suma=0,sumalocal; if(argc < 2) { fprintf(stderr,"\nFalta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel private(sumalocal,tid) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for schedule(static) for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d\n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see <http://www.gnu.org/licenses/>. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H /** \internal */ inline void ei_manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; if(action==SetAction) { ei_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { ei_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { ei_internal_assert(false); } } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; ei_manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { ei_manage_multi_threading(SetAction, &v); } template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {} int volatile sync; int volatile users; Index rhs_start; Index rhs_length; }; template<bool Condition, typename Functor, typename Index> void ei_parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { #ifndef EIGEN_HAS_OPENMP // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // 1- are we already in a parallel session? // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Index size = transpose ? cols : rows; // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned Index max_threads = std::max<Index>(1,size / 32); // 3 - compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), max_threads); if(threads==1) return func(0,rows, 0,cols); func.initParallelSession(); if(transpose) std::swap(rows,cols); Index blockCols = (cols / threads) & ~Index(0x3); Index blockRows = (rows / threads) & ~Index(0x7); GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads]; #pragma omp parallel for schedule(static,1) num_threads(threads) for(Index i=0; i<threads; ++i) { Index r0 = i*blockRows; Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols; info[i].rhs_start = c0; info[i].rhs_length = actualBlockCols; if(transpose) func(0, cols, r0, actualBlockRows, info); else func(r0, actualBlockRows, 0,cols, info); } delete[] info; #endif } #endif // EIGEN_PARALLELIZER_H
1.c
#include <stdio.h> int main() { #pragma omp parallel { printf(" Hello "); } printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortMethod *method,const size_t number_arguments,const double *arguments, size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) ResetMagickMemory(distort_args,0,12*sizeof(double)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); if (image->alpha_trait == UndefinedPixelTrait) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel, exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); (void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->alpha_trait=image->alpha_trait; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image, DistortMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { register ssize_t i; char image_gen[MagickPathExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case PerspectiveDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr, "Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i<4; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for (; i<7; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n", coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n", coeff[8] < 0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: (void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", 0.5-coeff[3], 0.5-coeff[7]); (void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if ( coeff[9] != 0 ) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n", -2*coeff[9], coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n", coeff[9]); } else (void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4], coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n", -coeff[1], coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case BilinearReverseDistortion: #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n", coeff[0],(unsigned long) nterms); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n yy ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n"); for ( i=0; i<5; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n", coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n", coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", -coeff[2], -coeff[3]); (void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1], coeff[7] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] ); (void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] ); (void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] ); (void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc,yc; /* NOTE: This does the barrel roll in pixel coords not image coords ** The internal distortion must do it in image coordinates, ** so that is what the center coeff (8,9) is given in. */ xc = ((double)image->columns-1.0)/2.0 + image->page.x; yc = ((double)image->rows-1.0)/2.0 + image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n", coeff[8]-0.5, coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[4],coeff[5],coeff[6],coeff[7]); (void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) return((Image *) NULL); /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait != UndefinedPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel, /* pixel color to assign to distorted image */ invalid; /* the color to assign when distort result is invalid */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register ssize_t i; register Quantum *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'alpha_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; ConformPixelInfo(distort_image,&distort_image->alpha_color,&invalid, exception); for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 0 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelViaPixelInfo(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelViaPixelInfo(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DistortImage) #endif proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff = (double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=degrees; while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortMethod distort_method; distort_method=(DistortMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ register ssize_t i; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha/=denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=ClampPixel(QuantumRange*pixel.red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=ClampPixel(QuantumRange*pixel.green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=ClampPixel(QuantumRange*pixel.blue); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=ClampPixel(QuantumRange*pixel.black); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=ClampPixel(QuantumRange*pixel.alpha); SetPixelViaPixelInfo(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SparseColorImage) #endif proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
test6.c
void foo () { 0; l1: #pragma omp barrier 1; } int main() { #pragma omp parallel { 2; if (3) { 4; foo (); 5; } else { 6; l2: #pragma omp barrier 7; } if (8) { 9; foo(); 10; } else { 11; l3: #pragma omp barrier 12; } 13; } }
GB_binop__bxnor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16) // A*D function (colscale): GB (_AxD__bxnor_uint16) // D*A function (rowscale): GB (_DxB__bxnor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16) // C=scalar+B GB (_bind1st__bxnor_uint16) // C=scalar+B' GB (_bind1st_tran__bxnor_uint16) // C=A+scalar GB (_bind2nd__bxnor_uint16) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LU_omp.c
/********************************************************************** Modify the code-Add OpenMP directives to parallelize the LU kernel ***********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #include "utils.h" int main(int argc, char * argv[]){ int i,j,k; double l; double total_time; struct timeval ts,tf; int X = atoi(argv[1]); int Y = X; double **A=malloc2D(X,Y); double *Ak, *Ai; // print2D(A, X, Y); init2D(A, X, Y); gettimeofday(&ts,NULL); /* Theloume oso ginetai, na kanoume liges anafores sthn koinh mnhmh.. * Opote kratame tis ekastote grammes k kai i me 3exwristous private * deiktes Ak kai Ai antistoixa */ for (k=0; k<X-1; k++) { // for k #pragma omp parallel private(Ak) { // pragma Ak = A[k]; #pragma omp for schedule(static) private(l, j, Ai) for (i=k+1; i<X; i++) { // for i Ai = A[i]; l=Ai[k]/Ak[k]; for (j=k; j<Y; j++) Ai[j]-=l*Ak[j]; } // for i } // pragma } // for k gettimeofday(&tf,NULL); total_time=(tf.tv_sec-ts.tv_sec)+(tf.tv_usec-ts.tv_usec)*0.000001; printf("LU-OpenMP\t%d\t%.3lf\n",X,total_time); char * filename="output_omp"; print2DFile(A,X,Y,filename); free2D(A, X, Y); return 0; }
Clustering.h
// // Copyright (C) 2015-2020 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include "NGT/Index.h" #include "defines.h" using namespace std; #if defined(NGT_AVX_DISABLED) #define NGT_CLUSTER_NO_AVX #else #if defined(__AVX2__) #define NGT_CLUSTER_AVX2 #else #define NGT_CLUSTER_NO_AVX #endif #endif #if defined(NGT_CLUSTER_NO_AVX) // #warning "*** SIMD is *NOT* available! ***" #else #include <immintrin.h> #endif #include <omp.h> #include <random> namespace NGT { class Clustering { public: enum InitializationMode { InitializationModeHead = 0, InitializationModeRandom = 1, InitializationModeKmeansPlusPlus = 2 }; enum ClusteringType { ClusteringTypeKmeansWithNGT = 0, ClusteringTypeKmeansWithoutNGT = 1, ClusteringTypeKmeansWithIteration = 2, ClusteringTypeKmeansWithNGTForCentroids = 3 }; class Entry { public: Entry() : vectorID(0), centroidID(0), distance(0.0) { } Entry(size_t vid, size_t cid, double d) : vectorID(vid), centroidID(cid), distance(d) { } bool operator<(const Entry& e) const { return distance > e.distance; } uint32_t vectorID; uint32_t centroidID; double distance; }; class DescendingEntry { public: DescendingEntry(size_t vid, double d) : vectorID(vid), distance(d) { } bool operator<(const DescendingEntry& e) const { return distance < e.distance; } size_t vectorID; double distance; }; class Cluster { public: Cluster(std::vector<float>& c) : centroid(c), radius(0.0) { } Cluster(const Cluster& c) { *this = c; } Cluster& operator=(const Cluster& c) { members = c.members; centroid = c.centroid; radius = c.radius; return *this; } std::vector<Entry> members; std::vector<float> centroid; double radius; }; Clustering(InitializationMode im = InitializationModeHead, ClusteringType ct = ClusteringTypeKmeansWithNGT, size_t mi = 100) : clusteringType(ct), initializationMode(im), maximumIteration(mi) { initialize(); } void initialize() { epsilonFrom = 0.12; epsilonTo = epsilonFrom; epsilonStep = 0.04; resultSizeCoefficient = 5; } static void convert(std::vector<std::string>& strings, std::vector<float>& vector) { vector.clear(); for (auto it = strings.begin(); it != strings.end(); ++it) { vector.push_back(stod(*it)); } } static void extractVector(const std::string& str, std::vector<float>& vec) { std::vector<std::string> tokens; NGT::Common::tokenize(str, tokens, " \t"); convert(tokens, vec); } static void loadVectors(const std::string& file, std::vector<std::vector<float> >& vectors) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadVectors::Cannot open " + file); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); vectors.push_back(v); } } static void saveVectors(const std::string& file, std::vector<std::vector<float> >& vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { std::vector<float>& v = *vit; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } static void saveVector(const std::string& file, std::vector<size_t>& vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { os << *vit << std::endl; } } static void loadClusters(const std::string& file, std::vector<Cluster>& clusters, size_t numberOfClusters = 0) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadClusters::Cannot open " + file); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); clusters.push_back(v); if ((numberOfClusters != 0) && (clusters.size() >= numberOfClusters)) { break; } } if ((numberOfClusters != 0) && (clusters.size() < numberOfClusters)) { // std::cerr << "initial cluster data are not enough. " << clusters.size() << ":" << numberOfClusters // << std::endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("initial cluster data are not enough. " + std::to_string(clusters.size()) + ":" + std::to_string(numberOfClusters)); exit(1); } } #if !defined(NGT_CLUSTER_NO_AVX) static double sumOfSquares(float* a, float* b, size_t size) { __m256 sum = _mm256_setzero_ps(); float* last = a + size; float* lastgroup = last - 7; while (a < lastgroup) { __m256 v = _mm256_sub_ps(_mm256_loadu_ps(a), _mm256_loadu_ps(b)); sum = _mm256_add_ps(sum, _mm256_mul_ps(v, v)); a += 8; b += 8; } __attribute__((aligned(32))) float f[8]; _mm256_store_ps(f, sum); double s = f[0] + f[1] + f[2] + f[3] + f[4] + f[5] + f[6] + f[7]; while (a < last) { double d = *a++ - *b++; s += d * d; } return s; } #else // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double sumOfSquares(float* a, float* b, size_t size) { double csum = 0.0; float* x = a; float* y = b; for (size_t i = 0; i < size; i++) { double d = (double)*x++ - (double)*y++; csum += d * d; } return csum; } #endif // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double distanceL2(std::vector<float>& vector1, std::vector<float>& vector2) { return sqrt(sumOfSquares(&vector1[0], &vector2[0], vector1.size())); } static double distanceL2(std::vector<std::vector<float> >& vector1, std::vector<std::vector<float> >& vector2) { assert(vector1.size() == vector2.size()); double distance = 0.0; for (size_t i = 0; i < vector1.size(); i++) { distance += distanceL2(vector1[i], vector2[i]); } distance /= (double)vector1.size(); return distance; } static double meanSumOfSquares(std::vector<float>& vector1, std::vector<float>& vector2) { return sumOfSquares(&vector1[0], &vector2[0], vector1.size()) / (double)vector1.size(); } static void subtract(std::vector<float>& a, std::vector<float>& b) { assert(a.size() == b.size()); auto bit = b.begin(); for (auto ait = a.begin(); ait != a.end(); ++ait, ++bit) { *ait = *ait - *bit; } } static void getInitialCentroidsFromHead(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); for (size_t i = 0; i < size; i++) { clusters.push_back(Cluster(vectors[i])); } } static void getInitialCentroidsRandomly(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t size, size_t seed) { clusters.clear(); std::random_device rnd; if (seed == 0) { seed = rnd(); } std::mt19937 mt(seed); for (size_t i = 0; i < size; i++) { size_t idx = mt() * vectors.size() / mt.max(); if (idx >= size) { i--; continue; } clusters.push_back(Cluster(vectors[idx])); } assert(clusters.size() == size); } static void getInitialCentroidsKmeansPlusPlus(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); std::random_device rnd; std::mt19937 mt(rnd()); size_t idx = (long long)mt() * (long long)vectors.size() / (long long)mt.max(); clusters.push_back(Cluster(vectors[idx])); NGT::Timer timer; for (size_t k = 1; k < size; k++) { double sum = 0; std::priority_queue<DescendingEntry> sortedObjects; // get d^2 and sort #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; double mind = DBL_MAX; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); d *= d; if (d < mind) { mind = d; } } #pragma omp critical { sortedObjects.push(DescendingEntry(distance(vectors.begin(), vit), mind)); sum += mind; } } double l = (double)mt() / (double)mt.max() * sum; while (!sortedObjects.empty()) { sum -= sortedObjects.top().distance; if (l >= sum) { clusters.push_back(Cluster(vectors[sortedObjects.top().vectorID])); break; } sortedObjects.pop(); } } } static void assign(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t clusterSize = std::numeric_limits<size_t>::max()) { // compute distances to the nearest clusters, and construct heap by the distances. NGT::Timer timer; timer.start(); std::vector<Entry> sortedObjects(vectors.size()); #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } sortedObjects[vi] = Entry(vi, mincidx, mind); } } std::sort(sortedObjects.begin(), sortedObjects.end()); // clear for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } // distribute objects to the nearest clusters in the same size constraint. for (auto soi = sortedObjects.rbegin(); soi != sortedObjects.rend();) { Entry& entry = *soi; if (entry.centroidID >= clusters.size()) { // std::cerr << "Something wrong. " << entry.centroidID << ":" << clusters.size() << std::endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("Something wrong. " + std::to_string(entry.centroidID) + ":" + std::to_string(clusters.size())); soi++; continue; } if (clusters[entry.centroidID].members.size() < clusterSize) { clusters[entry.centroidID].members.push_back(entry); soi++; } else { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() >= clusterSize) { continue; } double d = distanceL2(vectors[entry.vectorID], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } entry = Entry(entry.vectorID, mincidx, mind); int pt = distance(sortedObjects.rbegin(), soi); std::sort(sortedObjects.begin(), soi.base()); soi = sortedObjects.rbegin() + pt; assert(pt == distance(sortedObjects.rbegin(), soi)); } } moveFartherObjectsToEmptyClusters(clusters); } static void moveFartherObjectsToEmptyClusters(std::vector<Cluster>& clusters) { size_t emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; double max = 0.0; auto maxit = clusters.begin(); for (auto scit = clusters.begin(); scit != clusters.end(); ++scit) { if ((*scit).members.size() >= 2 && (*scit).members.back().distance > max) { maxit = scit; max = (*scit).members.back().distance; } } (*cit).members.push_back((*maxit).members.back()); (*cit).members.back().centroidID = distance(clusters.begin(), cit); (*maxit).members.pop_back(); } } emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; } } } static void assignWithNGT(NGT::Index& index, std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, float& radius, size_t& resultSize, float epsilon = 0.12, size_t notRetrievedObjectCount = 0) { size_t dataSize = vectors.size(); assert(index.getObjectRepositorySize() - 1 == vectors.size()); vector<vector<Entry> > results(clusters.size()); #pragma omp parallel for for (size_t ci = 0; ci < clusters.size(); ci++) { auto cit = clusters.begin() + ci; NGT::ObjectDistances objects; // result set NGT::Object* query = 0; query = index.allocateObject((*cit).centroid); // set search prameters. NGT::SearchContainer sc(*query); // search parametera container. sc.setResults(&objects); // set the result set. sc.setEpsilon(epsilon); // set exploration coefficient. if (radius > 0.0) { sc.setRadius(radius); sc.setSize(dataSize / 2); } else { sc.setSize(resultSize); // the number of resultant objects. } index.search(sc); results[ci].reserve(objects.size()); for (size_t idx = 0; idx < objects.size(); idx++) { size_t oidx = objects[idx].id - 1; results[ci].push_back(Entry(oidx, ci, objects[idx].distance)); } index.deleteObject(query); } size_t resultCount = 0; for (auto ri = results.begin(); ri != results.end(); ++ri) { resultCount += (*ri).size(); } vector<Entry> sortedResults; sortedResults.reserve(resultCount); for (auto ri = results.begin(); ri != results.end(); ++ri) { auto end = (*ri).begin(); for (; end != (*ri).end(); ++end) { } std::copy((*ri).begin(), end, std::back_inserter(sortedResults)); } vector<bool> processedObjects(dataSize, false); for (auto i = sortedResults.begin(); i != sortedResults.end(); ++i) { processedObjects[(*i).vectorID] = true; } notRetrievedObjectCount = 0; vector<uint32_t> notRetrievedObjectIDs; for (size_t idx = 0; idx < dataSize; idx++) { if (!processedObjects[idx]) { notRetrievedObjectCount++; notRetrievedObjectIDs.push_back(idx); } } sort(sortedResults.begin(), sortedResults.end()); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } for (auto i = sortedResults.rbegin(); i != sortedResults.rend(); ++i) { size_t objectID = (*i).vectorID; size_t clusterID = (*i).centroidID; if (processedObjects[objectID]) { processedObjects[objectID] = false; clusters[clusterID].members.push_back(*i); clusters[clusterID].members.back().centroidID = clusterID; radius = (*i).distance; } } vector<Entry> notRetrievedObjects(notRetrievedObjectIDs.size()); #pragma omp parallel for for (size_t vi = 0; vi < notRetrievedObjectIDs.size(); vi++) { auto vit = notRetrievedObjectIDs.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(vectors[*vit], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } notRetrievedObjects[vi] = Entry(*vit, mincidx, mind); // Entry(vectorID, centroidID, distance) } } sort(notRetrievedObjects.begin(), notRetrievedObjects.end()); for (auto nroit = notRetrievedObjects.begin(); nroit != notRetrievedObjects.end(); ++nroit) { clusters[(*nroit).centroidID].members.push_back(*nroit); } moveFartherObjectsToEmptyClusters(clusters); } static double calculateCentroid(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters) { double distance = 0; size_t memberCount = 0; for (auto it = clusters.begin(); it != clusters.end(); ++it) { memberCount += (*it).members.size(); if ((*it).members.size() != 0) { std::vector<float> mean(vectors[0].size(), 0.0); for (auto memit = (*it).members.begin(); memit != (*it).members.end(); ++memit) { auto mit = mean.begin(); auto& v = vectors[(*memit).vectorID]; for (auto vit = v.begin(); vit != v.end(); ++vit, ++mit) { *mit += *vit; } } for (auto mit = mean.begin(); mit != mean.end(); ++mit) { *mit /= (*it).members.size(); } distance += distanceL2((*it).centroid, mean); (*it).centroid = mean; } else { // cerr << "Clustering: Fatal Error. No member!" << endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("Clustering: Fatal Error. No member!"); abort(); } } return distance; } static void saveClusters(const std::string& file, std::vector<Cluster>& clusters) { std::ofstream os(file); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { std::vector<float>& v = (*cit).centroid; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } double kmeansWithoutNGT(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { size_t clusterSize = std::numeric_limits<size_t>::max(); if (clusterSizeConstraint) { clusterSize = ceil((double)vectors.size() / (double)numberOfClusters); } double diff = 0; for (size_t i = 0; i < maximumIteration; i++) { // std::cerr << "iteration=" << i << std::endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("iteration=" + std::to_string(i)); assign(vectors, clusters, clusterSize); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. diff = calculateCentroid(vectors, clusters); if (diff == 0) { break; } } return diff == 0; } double kmeansWithNGT(NGT::Index& index, std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters, float epsilon) { diffHistory.clear(); NGT::Timer timer; timer.start(); float radius; double diff = 0.0; size_t resultSize; resultSize = resultSizeCoefficient * vectors.size() / clusters.size(); for (size_t i = 0; i < maximumIteration; i++) { size_t notRetrievedObjectCount = 0; radius = -1.0; assignWithNGT(index, vectors, clusters, radius, resultSize, epsilon, notRetrievedObjectCount); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. std::vector<Cluster> prevClusters = clusters; diff = calculateCentroid(vectors, clusters); timer.stop(); // std::cerr << "iteration=" << i << " time=" << timer << " diff=" << diff << std::endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("iteration=" + std::to_string(i) + " time=" + std::to_string(timer.time)+ " diff=" + std::to_string(diff)); timer.start(); diffHistory.push_back(diff); if (diff == 0) { break; } } return diff; } double kmeansWithNGT(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { pid_t pid = getpid(); std::stringstream str; str << "cluster-ngt." << pid; string database = str.str(); string dataFile; size_t dataSize = 0; size_t dim = clusters.front().centroid.size(); NGT::Property property; property.dimension = dim; property.graphType = NGT::Property::GraphType::GraphTypeANNG; property.objectType = NGT::Index::Property::ObjectType::Float; property.distanceType = NGT::Index::Property::DistanceType::DistanceTypeL2; NGT::Index::createGraphAndTree(database, property, dataFile, dataSize); float* data = new float[vectors.size() * dim]; float* ptr = data; dataSize = vectors.size(); for (auto vi = vectors.begin(); vi != vectors.end(); ++vi) { memcpy(ptr, &((*vi)[0]), dim * sizeof(float)); ptr += dim; } size_t threadSize = 20; NGT::Index::append(database, data, dataSize, threadSize); delete[] data; NGT::Index index(database); return kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilonFrom); } double kmeansWithNGT(NGT::Index& index, size_t numberOfClusters, std::vector<Cluster>& clusters) { NGT::GraphIndex& graph = static_cast<NGT::GraphIndex&>(index.getIndex()); NGT::ObjectSpace& os = graph.getObjectSpace(); size_t size = os.getRepository().size(); std::vector<std::vector<float> > vectors(size - 1); for (size_t idx = 1; idx < size; idx++) { try { os.getObject(idx, vectors[idx - 1]); } catch (...) { // cerr << "Cannot get object " << idx << endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("Cannot get object " + std::to_string(idx)); } } // cerr << "# of data for clustering=" << vectors.size() << endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("# of data for clustering=" + std::to_string(vectors.size())); double diff = DBL_MAX; clusters.clear(); setupInitialClusters(vectors, numberOfClusters, clusters); for (float epsilon = epsilonFrom; epsilon <= epsilonTo; epsilon += epsilonStep) { // cerr << "epsilon=" << epsilon << endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("epsilon=" + std::to_string(epsilon)); diff = kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilon); if (diff == 0.0) { return diff; } } return diff; } double kmeansWithNGT(NGT::Index& index, size_t numberOfClusters, NGT::Index& outIndex) { std::vector<Cluster> clusters; double diff = kmeansWithNGT(index, numberOfClusters, clusters); for (auto i = clusters.begin(); i != clusters.end(); ++i) { outIndex.insert((*i).centroid); } outIndex.createIndex(16); return diff; } double kmeansWithNGT(NGT::Index& index, size_t numberOfClusters) { NGT::Property prop; index.getProperty(prop); string path = index.getPath(); index.save(); index.close(); string outIndexName = path; string inIndexName = path + ".tmp"; std::rename(outIndexName.c_str(), inIndexName.c_str()); NGT::Index::createGraphAndTree(outIndexName, prop); index.open(outIndexName); NGT::Index inIndex(inIndexName); double diff = kmeansWithNGT(inIndex, numberOfClusters, index); inIndex.close(); NGT::Index::destroy(inIndexName); return diff; } double kmeansWithNGT(string& indexName, size_t numberOfClusters) { NGT::Index inIndex(indexName); double diff = kmeansWithNGT(inIndex, numberOfClusters); inIndex.save(); inIndex.close(); return diff; } static double calculateMSE(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters) { double mse = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { mse += meanSumOfSquares((*cit).centroid, vectors[(*mit).vectorID]); } } assert(vectors.size() == count); return mse / (double)vectors.size(); } static double calculateML2(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters) { double d = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); double localD = 0.0; for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { double distance = distanceL2((*cit).centroid, vectors[(*mit).vectorID]); d += distance; localD += distance; } } if (vectors.size() != count) { // std::cerr << "Warning! vectors.size() != count" << std::endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("Warning! vectors.size() != count"); } return d / (double)vectors.size(); } static double calculateML2FromSpecifiedCentroids(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, std::vector<size_t>& centroidIds) { double d = 0.0; size_t count = 0; for (auto it = centroidIds.begin(); it != centroidIds.end(); ++it) { Cluster& cluster = clusters[(*it)]; count += cluster.members.size(); for (auto mit = cluster.members.begin(); mit != cluster.members.end(); ++mit) { d += distanceL2(cluster.centroid, vectors[(*mit).vectorID]); } } return d / (double)vectors.size(); } void setupInitialClusters(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { if (clusters.empty()) { switch (initializationMode) { case InitializationModeHead: { getInitialCentroidsFromHead(vectors, clusters, numberOfClusters); break; } case InitializationModeRandom: { getInitialCentroidsRandomly(vectors, clusters, numberOfClusters, 0); break; } case InitializationModeKmeansPlusPlus: { getInitialCentroidsKmeansPlusPlus(vectors, clusters, numberOfClusters); break; } default: // std::cerr << "proper initMode is not specified." << std::endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("proper initMode is not specified."); exit(1); } } } bool kmeans(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { setupInitialClusters(vectors, numberOfClusters, clusters); switch (clusteringType) { case ClusteringTypeKmeansWithoutNGT: return kmeansWithoutNGT(vectors, numberOfClusters, clusters); break; case ClusteringTypeKmeansWithNGT: return kmeansWithNGT(vectors, numberOfClusters, clusters); break; default: // cerr << "kmeans::fatal error!. invalid clustering type. " << clusteringType << endl; if (NGT_LOG_DEBUG_) (*NGT_LOG_DEBUG_)("kmeans::fatal error!. invalid clustering type. " + std::to_string(clusteringType)); abort(); break; } } static void evaluate(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, char mode, std::vector<size_t> centroidIds = std::vector<size_t>()) { size_t clusterSize = std::numeric_limits<size_t>::max(); assign(vectors, clusters, clusterSize); // std::cout << "The number of vectors=" << vectors.size() << std::endl; // std::cout << "The number of centroids=" << clusters.size() << std::endl; if (centroidIds.size() == 0) { switch (mode) { case 'e': // std::cout << "MSE=" << calculateMSE(vectors, clusters) << std::endl; break; case '2': default: // std::cout << "ML2=" << calculateML2(vectors, clusters) << std::endl; break; } } else { switch (mode) { case 'e': break; case '2': default: // std::cout << "ML2=" << calculateML2FromSpecifiedCentroids(vectors, clusters, centroidIds) // << std::endl; break; } } } ClusteringType clusteringType; InitializationMode initializationMode; size_t numberOfClusters; bool clusterSizeConstraint; size_t maximumIteration; float epsilonFrom; float epsilonTo; float epsilonStep; size_t resultSizeCoefficient; vector<double> diffHistory; }; } // namespace NGT
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 3 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; /// The likelihood of a branch being taken. enum Likelihood { LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute. LH_None, ///< No attribute set or branches of the IfStmt have ///< the same attribute. LH_Likely ///< Branch has the [[likely]] attribute. }; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \returns the likelihood of a statement. static Likelihood getLikelihood(const Stmt *S); /// \returns the likelihood of the 'then' branch of an 'if' statement. The /// 'else' branch is required to determine whether both branches specify the /// same likelihood, which affects the result. static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else); /// \returns whether the likelihood of the branches of an if statement are /// conflicting. When the first element is \c true there's a conflict and /// the Attr's are the conflicting attributes of the Then and Else Stmt. static std::tuple<bool, const Attr *, const Attr *> determineLikelihoodConflict(const Stmt *Then, const Stmt *Else); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(raw_ostream &OS, const ASTContext &Context) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL, SourceLocation RPL, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc, RParenLoc; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
fwk_system.h
// system framework utils // - rlyeh, public domain. // // Note: Windows users add `/Zi` compilation flags, else add `-g` and/or `-ldl` flags // Note: If you are linking your binary using GNU ld you need to add --export-dynamic #ifndef SYSTEM_H #define SYSTEM_H int os_argc(); char* os_argv(int); const char* os_option(const char *commalist, const char *defaults); int os_optioni(const char *commalist, int defaults); float os_optionf(const char *commalist, float defaults); char* os_exec_output(); int os_exec(const char *command); #define os_exec(...) os_exec(file_normalize(stringf(__VA_ARGS__))) void tty_color(unsigned color); void tty_reset(); int cpu_cores(void); char* app_path(); void app_reload(); double time_ss(); double time_ms(); uint64_t time_human(); // YYYYMMDDhhmmss double sleep_ss(double ss); double sleep_ms(double ms); char* callstack( int traces ); // write callstack into a temporary string. do not delete it. int callstackf( FILE *fp, int traces ); // write callstack to file. <0 traces to invert order. void alert(const char *message); void hexdump( FILE *fp, const void *ptr, unsigned len, int width ); void breakpoint(const char *reason); bool has_debugger(); uint16_t lil16(uint16_t n); // swap16 as lil uint32_t lil32(uint32_t n); // swap32 as lil float lil32f(float n); // swap32 as lil uint64_t lil64(uint64_t n); // swap64 as lil double lil64f(double n); // swap64 as lil uint16_t big16(uint16_t n); // swap16 as big uint32_t big32(uint32_t n); // swap32 as big float big32f(float n); // swap32 as big uint64_t big64(uint64_t n); // swap64 as big double big64f(double n); // swap64 as big uint16_t* lil16p(void *n, int sz); uint32_t* lil32p(void *n, int sz); float* lil32pf(void *n, int sz); uint64_t* lil64p(void *n, int sz); double* lil64pf(void *n, int sz); uint16_t* big16p(void *n, int sz); uint32_t* big32p(void *n, int sz); float* big32pf(void *n, int sz); uint64_t* big64p(void *n, int sz); double* big64pf(void *n, int sz); #define alert(...) alert(stringf(__VA_ARGS__)) #define PANIC(...) PANIC(stringf(__VA_ARGS__), __FILE__, __LINE__) int (PRINTF)(const char *text, const char *stack, const char *file, int line, const char *function); int (PANIC)(const char *error, const char *file, int line); #endif // ----------------------------------------------------------------------------- #ifdef SYSTEM_C #pragma once #if is(gcc) // || is(clang) int __argc; char **__argv; #if !is(ems) __attribute__((constructor)) void init_argcv(int argc, char **argv) { __argc = argc; __argv = argv; } #endif #endif char *app_path() { // should return absolute path always static char buffer[1024] = {0}; if( buffer[0] ) return buffer; #if is(win32) unsigned length = GetModuleFileNameA(NULL, buffer, sizeof(buffer)); // @todo: use GetModuleFileNameW+wchar_t && convert to utf8 instead char *a = strrchr(buffer, '/'); if(!a) a = buffer + strlen(buffer); char *b = strrchr(buffer, '\\'); if(!b) b = buffer + strlen(buffer); char slash = (a < b ? *a : b < a ? *b : '/'); return snprintf(buffer, 1024, "%.*s%c", length - (int)(a < b ? b - a : a - b), buffer, slash), buffer; #else // #elif is(linux) char path[21] = {0}; sprintf(path, "/proc/%d/exe", getpid()); readlink(path, buffer, sizeof(buffer)); return buffer; #endif } void app_reload() { // save_on_exit(); fflush(0); chdir(app_path()); execv(__argv[0], __argv); exit(0); } char* os_exec_output() { static local char os_exec__output[4096] = {0}; return os_exec__output; } int (os_exec)( const char *cmd ) { int rc = -1; char *buf = os_exec_output(); buf[0] = 0; // memset(buf, 0, 4096); if(!strstr(cmd, ifdef(win32, ">NUL", ">/dev/null"))) puts(cmd); // log unless silent for( FILE *fp = popen( cmd, "r" ); fp; rc = pclose(fp), fp = 0) { while( fgets(buf, 4096 - 1, fp) ) { char *r = strrchr(buf, '\r'); if(r) *r = 0; char *n = strrchr(buf, '\n'); if(n) *n = 0; } } return rc; } #if is(osx) #include <execinfo.h> // backtrace, backtrace_symbols #include <dlfcn.h> // dladdr, Dl_info #elif is(gcc) && !is(ems) #include <execinfo.h> // backtrace, backtrace_symbols #elif is(win32) // && !defined __TINYC__ #include <winsock2.h> // windows.h alternative #include <dbghelp.h> #pragma comment(lib, "DbgHelp") #pragma comment(lib, "Kernel32") static int backtrace( void **addr, int maxtraces ) { static HANDLE process = 0; if( !process ) process = GetCurrentProcess(); if( !process ) exit( puts( "error: no current process" ) ); static int init = 0; if( !init ) init = SymSetOptions(SYMOPT_UNDNAME), SymInitialize( process, NULL, TRUE ); if( !init ) exit( puts( "error: cannot initialize DbgHelp.lib" ) ); typedef USHORT (WINAPI *pFN)(); // TINYC //typedef USHORT (WINAPI *pFN)(__in ULONG, __in ULONG, __out PVOID*, __out_opt PULONG); // _MSC_VER static pFN rtlCaptureStackBackTrace = 0; if( !rtlCaptureStackBackTrace ) { rtlCaptureStackBackTrace = (pFN)GetProcAddress(LoadLibraryA("kernel32.dll"), "RtlCaptureStackBackTrace"); } if( !rtlCaptureStackBackTrace ) { return 0; } return rtlCaptureStackBackTrace(1, maxtraces, (PVOID *)addr, (DWORD *) 0); } static char **backtrace_symbols(void *const *array,int size) { HANDLE process = GetCurrentProcess(); enum { MAXSYMBOLNAME = 512 - sizeof(IMAGEHLP_SYMBOL64) }; char symbol64_buf [ 512 ]; char symbol64_bufblank[ 512 ] = {0}; IMAGEHLP_SYMBOL64 *symbol64 = (IMAGEHLP_SYMBOL64*)symbol64_buf; IMAGEHLP_SYMBOL64 *symbol64_blank = (IMAGEHLP_SYMBOL64*)symbol64_bufblank; symbol64_blank->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64); symbol64_blank->MaxNameLength = (MAXSYMBOLNAME-1) / 2; //wchar? int symlen = size * sizeof(char *); char **symbols = (char **)SYS_REALLOC(0, symlen); if( symbols ) { for( int i = 0; i < size; ++i ) { symbols[ i ] = NULL; } char begin[1024]; for( int i = 0; i < size; ++i ) { char **symbuf, *buffer = begin; DWORD64 dw1 = 0, dw2 = 0; *symbol64 = *symbol64_blank; if( SymGetSymFromAddr64( process, (DWORD64)array[i], &dw1, symbol64 ) ) { IMAGEHLP_LINE64 l64 = {0}; l64.SizeOfStruct = sizeof(IMAGEHLP_LINE64); if( SymGetLineFromAddr64( process, (DWORD64)array[i], (DWORD*)&dw2, &l64 ) ) { // int lenbase( const char *str ); int base = 0; // lenbase( l64.FileName ); buffer += sprintf(buffer,"%s (%s:%d)%c", symbol64->Name, &l64.FileName[ base ], l64.LineNumber, 0); } else { buffer += sprintf(buffer,"%s (??)%c", symbol64->Name, 0); } } else buffer += sprintf(buffer,"(??)%c", 0); size_t buflen = buffer - begin + 1; symbuf = (char **)SYS_REALLOC( symbols, symlen + buflen ); if( symbuf ) { memcpy( (char *)symbuf + symlen, begin, buflen ); symbuf[ i ] = (char *)(size_t)symlen; symbols = symbuf; symlen += buflen; } else break; } for( int i = 0; i < size; ++i ) { symbols[ i ] = (char *)symbols + (size_t)symbols[ i ]; } } return symbols; } #else static int backtrace(void **heap, int num) { return 0; } static char **backtrace_symbols(void *const *sym,int num) { return 0; } #endif void trace_cb( int traces, int (*yield)(const char *)) { enum { skip = 1 }; /* exclude 1 trace from stack (this function) */ enum { maxtraces = 128 }; int inc = 1; if( traces < 0 ) traces = -traces, inc = -1; if( traces == 0 ) return; if( traces > maxtraces ) traces = maxtraces; void *stack[ maxtraces ]; traces = backtrace( stack, traces ); char **symbols = backtrace_symbols( stack, traces ); char demangled[1024] = "??", buf[1024]; int L = 0, B = inc>0 ? skip - 1 : traces, E = inc>0 ? traces : skip - 1; for( int i = B; ( i += inc ) != E; ) { #if is(linux) char *address = strstr( symbols[i], "[" ) + 1; address[strlen(address) - 1] = '\0'; char *binary = symbols[i]; strstr( symbols[i], "(" )[0] = '\0'; char command[1024]; sprintf(command, "addr2line -e %s %s", binary, address); for( FILE *fp = popen( command, "r" ); fp ; pclose(fp), fp = 0 ) { fgets(demangled, sizeof(demangled), fp); int len = strlen(demangled); while( len > 0 && demangled[len-1] < 32 ) demangled[--len] = 0; } symbols[i] = demangled; #elif is(osx) struct Dl_info info; if( dladdr(stack[i], &info) && info.dli_sname ) { char *dmgbuf = info.dli_sname[0] != '_' ? NULL : __cxa_demangle(info.dli_sname, NULL, 0, NULL); strcpy( demangled, dmgbuf ? dmgbuf : info.dli_sname ); symbols[i] = demangled; FREE( dmgbuf ); } #endif sprintf(buf, "%03d: %#016p %s", ++L, stack[i], symbols[i]); //sprintf(buf, "%03d: %s", ++L, symbols[i]); if( yield(buf) < 0 ) break; } SYS_REALLOC( symbols, 0 ); } static local char *trace_strbuf[128] = {0}; static local int trace_counter = 0, trace_len = 0; int trace_(const char *line) { int len = strlen(line); trace_len += len + 1; trace_strbuf[trace_counter] = (char*)SYS_REALLOC(trace_strbuf[trace_counter], (len * 1.5)); strcpy(trace_strbuf[trace_counter], line ); trace_counter = (trace_counter +1) % 128; return 1; } char *callstack( int traces ) { #if is(linux) return ""; // @fixme: not really working as expected #else //if( trace_ ) trace_str_ = SYS_REALLOC(trace_str_, trace_lenbuf_ = 0); trace_counter = trace_len = 0; trace_cb( traces, trace_ ); static local char *buf = 0; // @fixme: 1 leak per invoking thread SYS_REALLOC(buf, 0); buf = (char*)SYS_REALLOC( 0, trace_len + 1 ); buf[0] = 0; for( int i = 0; i < trace_counter; ++i ) { strcat(buf, trace_strbuf[i] ); // <-- optimize strcat(buf, "\n"); } return buf ? buf : ""; // @fixme: should return NULL if no callstack is retrieved? #endif } int callstackf( FILE *fp, int traces ) { char *buf = callstack(traces); fputs(buf, fp); return 0; } // ----------------------------------------------------------------------------- // endian #if is(vc) #include <stdlib.h> #define swap16 _byteswap_ushort #define swap32 _byteswap_ulong #define swap64 _byteswap_uint64 #elif is(gcc) #define swap16 __builtin_bswap16 #define swap32 __builtin_bswap32 #define swap64 __builtin_bswap64 #else uint16_t swap16( uint16_t x ) { return (x << 8) | (x >> 8); } uint32_t swap32( uint32_t x ) { x = ((x << 8) & 0xff00ff00) | ((x >> 8) & 0x00ff00ff); return (x << 16) | (x >> 16); } uint64_t swap64( uint64_t x ) { x = ((x << 8) & 0xff00ff00ff00ff00ULL) | ((x >> 8) & 0x00ff00ff00ff00ffULL); x = ((x << 16) & 0xffff0000ffff0000ULL) | ((x >> 16) & 0x0000ffff0000ffffULL); return (x << 32) | (x >> 32); } #endif float swap32f(float n) { union { float t; uint32_t i; } conv; conv.t = n; conv.i = swap32(conv.i); return conv.t; } double swap64f(double n) { union { double t; uint64_t i; } conv; conv.t = n; conv.i = swap64(conv.i); return conv.t; } #define is_big() ((*(uint16_t *)"\0\1") == 1) #define is_little() ((*(uint16_t *)"\0\1") != 1) uint16_t lil16(uint16_t n) { return is_big() ? swap16(n) : n; } uint32_t lil32(uint32_t n) { return is_big() ? swap32(n) : n; } uint64_t lil64(uint64_t n) { return is_big() ? swap64(n) : n; } uint16_t big16(uint16_t n) { return is_little() ? swap16(n) : n; } uint32_t big32(uint32_t n) { return is_little() ? swap32(n) : n; } uint64_t big64(uint64_t n) { return is_little() ? swap64(n) : n; } float lil32f(float n) { return is_big() ? swap32f(n) : n; } double lil64f(double n) { return is_big() ? swap64f(n) : n; } float big32f(float n) { return is_little() ? swap32f(n) : n; } double big64f(double n) { return is_little() ? swap64f(n) : n; } uint16_t* lil16p(void *p, int sz) { if(is_big() ) { uint16_t *n = (uint16_t *)p; for(int i = 0; i < sz; ++i) n[i] = swap16(n[i]); } return p; } uint16_t* big16p(void *p, int sz) { if(is_little()) { uint16_t *n = (uint16_t *)p; for(int i = 0; i < sz; ++i) n[i] = swap16(n[i]); } return p; } uint32_t* lil32p(void *p, int sz) { if(is_big() ) { uint32_t *n = (uint32_t *)p; for(int i = 0; i < sz; ++i) n[i] = swap32(n[i]); } return p; } uint32_t* big32p(void *p, int sz) { if(is_little()) { uint32_t *n = (uint32_t *)p; for(int i = 0; i < sz; ++i) n[i] = swap32(n[i]); } return p; } uint64_t* lil64p(void *p, int sz) { if(is_big() ) { uint64_t *n = (uint64_t *)p; for(int i = 0; i < sz; ++i) n[i] = swap64(n[i]); } return p; } uint64_t* big64p(void *p, int sz) { if(is_little()) { uint64_t *n = (uint64_t *)p; for(int i = 0; i < sz; ++i) n[i] = swap64(n[i]); } return p; } float * lil32pf(void *p, int sz) { if(is_big() ) { float *n = (float *)p; for(int i = 0; i < sz; ++i) n[i] = swap32f(n[i]); } return p; } float * big32pf(void *p, int sz) { if(is_little()) { float *n = (float *)p; for(int i = 0; i < sz; ++i) n[i] = swap32f(n[i]); } return p; } double * lil64pf(void *p, int sz) { if(is_big() ) { double *n = (double *)p; for(int i = 0; i < sz; ++i) n[i] = swap64f(n[i]); } return p; } double * big64pf(void *p, int sz) { if(is_little()) { double *n = (double *)p; for(int i = 0; i < sz; ++i) n[i] = swap64f(n[i]); } return p; } // ----------------------------------------------------------------------------- // cpu int cpu_cores(void) { #if is(win32) DWORD_PTR pm, sm; if( GetProcessAffinityMask(GetCurrentProcess(), &pm, &sm) ) if( pm ) { int count = 0; while( pm ) { ++count; pm &= pm - 1; } return count; } { SYSTEM_INFO si; GetSystemInfo(&si); return (int)si.dwNumberOfProcessors; } #else // unix int count = sysconf(_SC_NPROCESSORS_ONLN); return count > 0 ? count : 1; #endif #if 0 #elif is(linux) cpu_set_t prevmask, testmask; CPU_ZERO(&prevmask); CPU_ZERO(&testmask); sched_getaffinity(0, sizeof(prevmask), &prevmask); //Get current mask sched_setaffinity(0, sizeof(testmask), &testmask); //Set zero mask sched_getaffinity(0, sizeof(testmask), &testmask); //Get mask for all CPUs sched_setaffinity(0, sizeof(prevmask), &prevmask); //Reset current mask int num = CPU_COUNT(&testmask); return (num > 1 ? num : 1); #elif is(cpp) return (int)std::thread::hardware_concurrency(); #elif defined(_OPENMP) // omp int cores = 0; #pragma omp parallel { #pragma omp atomic ++cores; } return cores; #endif } // ---------------------------------------------------------------------------- // time double time_ss() { return glfwGetTime(); } double time_ms() { return glfwGetTime() * 1000.0; } uint64_t time_human() { time_t mtime = time(0); struct tm *ti = localtime(&mtime); return atoi64(stringf("%04d%02d%02d%02d%02d%02d",ti->tm_year+1900,ti->tm_mon+1,ti->tm_mday,ti->tm_hour,ti->tm_min,ti->tm_sec)); } #if 0 uint64_t time_gpu() { GLint64 t = 123456789; glGetInteger64v(GL_TIMESTAMP, &t); return (uint64_t)t; } #endif double sleep_ms(double ms) { double now = time_ms(); if( ms <= 0 ) { #if is(win32) Sleep(0); // yield #else usleep(0); #endif } else { #if is(win32) Sleep(ms); #else usleep(ms * 1000); #endif } return time_ms() - now; } double sleep_ss(double ss) { return sleep_ms( ss * 1000 ) / 1000.0; } // ---------------------------------------------------------------------------- // argc/v int os_argc() { return __argc; } char* os_argv(int arg) { return __argv[arg]; } // ---------------------------------------------------------------------------- // options const char *os_option(const char *commalist, const char *defaults) { while( commalist[0] ) { const char *begin = commalist; while(*commalist != ',' && *commalist != '\0') ++commalist; const char *end = commalist; char token[128]; snprintf(token, 128, "%.*s", (int)(end - begin), begin); for( int i = 1; i < os_argc(); ++i ) { char *arg = os_argv(i); if( !strcmpi( arg, token ) ) { if( (i+1) < os_argc() ) { return os_argv(i+1); } } } commalist = end + 1; } return defaults; } int os_optioni(const char *commalist, int defaults) { const char *rc = os_option(commalist, 0); return rc ? atoi(rc) : defaults; } float os_optionf(const char *commalist, float defaults) { const char *rc = os_option(commalist, 0); return rc ? atof(rc) : defaults; } // ---------------------------------------------------------------------------- // tty void tty_color(unsigned color) { #if is(win32) do_once { unsigned mode = 0; SetConsoleMode(GetStdHandle(-11), (GetConsoleMode(GetStdHandle(-11), &mode), mode|4)); } #endif if( color ) { // if( color == RED ) breakpoint("break on RED"); // debug unsigned r = (color >> 16) & 255; unsigned g = (color >> 8) & 255; unsigned b = (color >> 0) & 255; // 24-bit console ESC[ … 38;2;<r>;<g>;<b> … m Select RGB foreground color // 256-color console ESC[38;5;<fgcode>m // 0x00-0x07: standard colors (as in ESC [ 30..37 m) // 0x08-0x0F: high intensity colors (as in ESC [ 90..97 m) // 0x10-0xE7: 6*6*6=216 colors: 16 + 36*r + 6*g + b (0≤r,g,b≤5) // 0xE8-0xFF: grayscale from black to white in 24 steps r /= 51, g /= 51, b /= 51; // [0..5] printf("\033[38;5;%dm", r*36+g*6+b+16); // "\033[0;3%sm", color_code); } else { printf("%s", "\x1B[39;49m"); // reset } } void tty_puts(unsigned color, const char *text) { tty_color(color); puts(text); } void tty_init() { tty_color(0); } int tty_cols() { #if is(win32) CONSOLE_SCREEN_BUFFER_INFO c; if( GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &c) ) { int w = c.srWindow.Right-c.srWindow.Left-c.dwCursorPosition.X; return w; } #endif #if is(linux) #ifdef TIOCGSIZE struct ttysize ts; ioctl(STDIN_FILENO, TIOCGSIZE, &ts); return ts.ts_cols - 1; #endif #ifdef TIOCGWINSZ struct winsize ts; ioctl(STDIN_FILENO, TIOCGWINSZ, &ts); return ts.ws_col - 1; #endif #endif return 80; } // ----------------------------------------------------------------------------- // debugger #include <stdio.h> void hexdump( FILE *fp, const void *ptr, unsigned len, int width ) { unsigned char *data = (unsigned char*)ptr; for( unsigned jt = 0; jt < len; jt += width ) { fprintf( fp, "; %05d ", jt ); for( unsigned it = jt, next = it + width; it < len && it < next; ++it ) { fprintf( fp, "%02x %s", (unsigned char)data[it], &" \n\0...\n"[ (1+it) < len ? 2 * !!((1+it) % width) : 3 ] ); } fprintf( fp, "; %05d ", jt ); for( unsigned it = jt, next = it + width; it < len && it < next; ++it ) { fprintf( fp, " %c %s", (signed char)data[it] >= 32 ? (signed char)data[it] : (signed char)'.', &" \n\0...\n"[ (1+it) < len ? 2 * !!((1+it) % width) : 3 ] ); } } } #if is(vc) static void debugbreak(void) { do { \ __try { DebugBreak(); } \ __except (GetExceptionCode() == EXCEPTION_BREAKPOINT ? \ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {} \ } while(0); } #else // is(linux) static int is_debugger_present = -1; static void _sigtrap_handler(int signum) { is_debugger_present = 0; signal(SIGTRAP, SIG_DFL); } static void debugbreak(void) { // break if debugger present // __builtin_trap(); // //raise(SIGABRT); // SIGTRAP); //__asm__ volatile("int $0x03"); if( is_debugger_present < 0 ) { is_debugger_present = 1; signal(SIGTRAP, _sigtrap_handler); raise(SIGTRAP); } } #endif void (alert)(const char *message) { // @todo: move to app_, besides die() #if is(win32) MessageBoxA(0, message, 0,0); #else for(FILE *fp = fopen("/tmp/fwk.warning","wb");fp;fp=0) fputs(message,fp), fclose(fp), system("xmessage -center -file /tmp/fwk.warning"); #endif } void breakpoint(const char *reason) { window_visible(false); char *fulltext = reason[0] == '!' ? stringf("%s\n%s", reason+1, callstack(+48)) : reason; PRINTF("%s", fulltext); (alert)(fulltext); debugbreak(); window_visible(true); } bool has_debugger() { #if is(win32) return IsDebuggerPresent(); // SetLastError(123); OutputDebugStringA("\1"); enabled = GetLastError() != 123; #else return false; #endif } // ---------------------------------------------------------------------------- // logger unsigned determinate_color_from_text(const char *text) { /**/ if( strstri(text, "fail") || strstri(text, "error") ) return RED; else if( strstri(text, "warn") || strstri(text, "not found") ) return YELLOW; return 0; } int (PRINTF)(const char *text, const char *stack, const char *file, int line, const char *function) { // static thread_mutex_t lock, *init = 0; if(!init) thread_mutex_init(init = &lock); // thread_mutex_lock( &lock ); tty_color(/*errno ? RED :*/ determinate_color_from_text(text)); #if is(vc) char *slash = strrchr(file, '\\'); if(slash) file = slash + 1; #endif char *location = stringf("|%s|%s:%d", /*errno?strerror(errno):*/function, file, line); // errno = 0; printf("\r%*.s%s", tty_cols() + 1 - (int)strlen(location), "", location); printf("\r%07.3fs|%s%s", time_ss(), text, stack); tty_color(0); // thread_mutex_unlock( &lock ); return 1; } // ---------------------------------------------------------------------------- // panic static void *panic_oom_reserve; // for out-of-memory recovery int (PANIC)(const char *error, const char *file, int line) { panic_oom_reserve = SYS_REALLOC(panic_oom_reserve, 0); tty_color(RED); error += error[0] == '!'; fprintf(stderr, "Error: %s (%s:%d) (errno:%s)\n", error, file, line, strerror(errno)); fprintf(stderr, "%s", callstack(+16)); // no \n fflush(0); // fflush(stderr); tty_color(0); breakpoint(error); exit(-line); return 1; } #endif
myMultiFactorial.c
#include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char *argv[]) { /* sequential code */ int i, N=7,ans, numThreads = omp_get_max_threads(); int ress[numThreads]; //Array to store pre answers (before joining)... //Initialize the array to 1s for(i = 0; i < numThreads; i++){ ress[i] = 1; } printf("DEBUG NUM THREADS %d", numThreads); /* sequential code */ #pragma omp parallel for for(i=1;i<N;i++) { //Note the for starts at 1 cause we're going to ignore 0 in the multiplication. ress[omp_get_thread_num()] = ress[omp_get_thread_num()]*(i+1); //Calculates the products and stores them seperately printf("Calculando entrada %d\n",i); //sleep(1); } printf("Integrating answer"); ans = 1; for(i = 0; i < numThreads; i++){ //note for starts at 1 ans = ans * ress[i]; } printf("\nEl resultado es! %d\n", ans); return 0; }
timer.h
#pragma once #include <iostream> #include <chrono> #include <time.h> #include <sys/time.h> #include "config.h" #include "utils.h" using namespace std; using namespace std::chrono; /* ----------------------------- timer resource ------------------------------------ */ #define SEC_PER_DAY 86400 #define SEC_PER_HOUR 3600 #define SEC_PER_MIN 60 inline void get_wallclock_time() { struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); // Form the seconds of the day long hms = tv.tv_sec % SEC_PER_DAY; hms += tz.tz_dsttime * SEC_PER_HOUR; hms -= tz.tz_minuteswest * SEC_PER_MIN; // mod `hms` to insure in positive range of [0...SEC_PER_DAY) hms = (hms + SEC_PER_DAY) % SEC_PER_DAY; // Tear apart hms into h:m:s int hour = hms / SEC_PER_HOUR; int min = (hms % SEC_PER_HOUR) / SEC_PER_MIN; int sec = (hms % SEC_PER_HOUR) % SEC_PER_MIN; // or hms % SEC_PER_MIN // printf("Current local time: %d:%02d:%02d\n", hour, min, sec); } class Timer { private: high_resolution_clock::time_point start_time; high_resolution_clock::time_point stop_time; public: Timer() { } void tic() { get_wallclock_time(); start_time = high_resolution_clock::now(); } float toc() { get_wallclock_time(); stop_time = high_resolution_clock::now(); float elapsed_time = duration_cast<microseconds>(stop_time - start_time).count() / 1000000.0; return elapsed_time; } float print_toc() { get_wallclock_time(); stop_time = high_resolution_clock::now(); float elapsed_time = duration_cast<microseconds>(stop_time - start_time).count() / 1000000.0; cout << "Elapsed time: " << elapsed_time << " s = " << elapsed_time/3600.0 << " hr" << endl; return elapsed_time; } }; class TQDM { Timer timer; int iter = 0; int total = 0; int step_size = 1; float speed = 0; float percentage = 0; float eta = 0; public: TQDM(int _total = 0, int _step_size = 1) { init(_total, _step_size); } void init(int _total = 0, int _step_size = 1) { iter = 0; total = _total; step_size = _step_size; timer.tic(); } void step() { #pragma omp critical { iter++; if(iter%step_size == 0) { speed = (float)iter / timer.toc(); percentage = ((total > 0) ? iter*100.0/total : 0.0); eta = max((float)0.0, (total-iter)/speed); LOG(fixed << iter << "/" << total << " (" << setprecision(1) << percentage << "%) iterations done at " << setprecision(2) << speed << " it/sec eta : " << setprecision(1) << eta << " s"); } if(iter == total) finish(); } } void finish() { speed = (float)iter / timer.toc(); percentage = ((total > 0.0) ? iter*100.0/total : 100.0); LOGN(fixed << iter << "/" << total << " (" << setprecision(0) << percentage << "%) iterations done in " << setprecision(1) << timer.toc() << " seconds at " << setprecision(2) << speed << " it/sec"); } };